4f3a6003715034d51dd34c89bdf2b80eec517df3
[platform/kernel/linux-rpi.git] / arch / x86 / net / bpf_jit_comp.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * bpf_jit_comp.c: BPF JIT compiler
4  *
5  * Copyright (C) 2011-2013 Eric Dumazet (eric.dumazet@gmail.com)
6  * Internal BPF Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
7  */
8 #include <linux/netdevice.h>
9 #include <linux/filter.h>
10 #include <linux/if_vlan.h>
11 #include <linux/bpf.h>
12 #include <linux/memory.h>
13 #include <linux/sort.h>
14 #include <asm/extable.h>
15 #include <asm/set_memory.h>
16 #include <asm/nospec-branch.h>
17 #include <asm/text-patching.h>
18
19 static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
20 {
21         if (len == 1)
22                 *ptr = bytes;
23         else if (len == 2)
24                 *(u16 *)ptr = bytes;
25         else {
26                 *(u32 *)ptr = bytes;
27                 barrier();
28         }
29         return ptr + len;
30 }
31
32 #define EMIT(bytes, len) \
33         do { prog = emit_code(prog, bytes, len); } while (0)
34
35 #define EMIT1(b1)               EMIT(b1, 1)
36 #define EMIT2(b1, b2)           EMIT((b1) + ((b2) << 8), 2)
37 #define EMIT3(b1, b2, b3)       EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
38 #define EMIT4(b1, b2, b3, b4)   EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
39
40 #define EMIT1_off32(b1, off) \
41         do { EMIT1(b1); EMIT(off, 4); } while (0)
42 #define EMIT2_off32(b1, b2, off) \
43         do { EMIT2(b1, b2); EMIT(off, 4); } while (0)
44 #define EMIT3_off32(b1, b2, b3, off) \
45         do { EMIT3(b1, b2, b3); EMIT(off, 4); } while (0)
46 #define EMIT4_off32(b1, b2, b3, b4, off) \
47         do { EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0)
48
49 static bool is_imm8(int value)
50 {
51         return value <= 127 && value >= -128;
52 }
53
54 static bool is_simm32(s64 value)
55 {
56         return value == (s64)(s32)value;
57 }
58
59 static bool is_uimm32(u64 value)
60 {
61         return value == (u64)(u32)value;
62 }
63
64 /* mov dst, src */
65 #define EMIT_mov(DST, SRC)                                                               \
66         do {                                                                             \
67                 if (DST != SRC)                                                          \
68                         EMIT3(add_2mod(0x48, DST, SRC), 0x89, add_2reg(0xC0, DST, SRC)); \
69         } while (0)
70
71 static int bpf_size_to_x86_bytes(int bpf_size)
72 {
73         if (bpf_size == BPF_W)
74                 return 4;
75         else if (bpf_size == BPF_H)
76                 return 2;
77         else if (bpf_size == BPF_B)
78                 return 1;
79         else if (bpf_size == BPF_DW)
80                 return 4; /* imm32 */
81         else
82                 return 0;
83 }
84
85 /*
86  * List of x86 cond jumps opcodes (. + s8)
87  * Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32)
88  */
89 #define X86_JB  0x72
90 #define X86_JAE 0x73
91 #define X86_JE  0x74
92 #define X86_JNE 0x75
93 #define X86_JBE 0x76
94 #define X86_JA  0x77
95 #define X86_JL  0x7C
96 #define X86_JGE 0x7D
97 #define X86_JLE 0x7E
98 #define X86_JG  0x7F
99
100 /* Pick a register outside of BPF range for JIT internal work */
101 #define AUX_REG (MAX_BPF_JIT_REG + 1)
102 #define X86_REG_R9 (MAX_BPF_JIT_REG + 2)
103
104 /*
105  * The following table maps BPF registers to x86-64 registers.
106  *
107  * x86-64 register R12 is unused, since if used as base address
108  * register in load/store instructions, it always needs an
109  * extra byte of encoding and is callee saved.
110  *
111  * x86-64 register R9 is not used by BPF programs, but can be used by BPF
112  * trampoline. x86-64 register R10 is used for blinding (if enabled).
113  */
114 static const int reg2hex[] = {
115         [BPF_REG_0] = 0,  /* RAX */
116         [BPF_REG_1] = 7,  /* RDI */
117         [BPF_REG_2] = 6,  /* RSI */
118         [BPF_REG_3] = 2,  /* RDX */
119         [BPF_REG_4] = 1,  /* RCX */
120         [BPF_REG_5] = 0,  /* R8  */
121         [BPF_REG_6] = 3,  /* RBX callee saved */
122         [BPF_REG_7] = 5,  /* R13 callee saved */
123         [BPF_REG_8] = 6,  /* R14 callee saved */
124         [BPF_REG_9] = 7,  /* R15 callee saved */
125         [BPF_REG_FP] = 5, /* RBP readonly */
126         [BPF_REG_AX] = 2, /* R10 temp register */
127         [AUX_REG] = 3,    /* R11 temp register */
128         [X86_REG_R9] = 1, /* R9 register, 6th function argument */
129 };
130
131 static const int reg2pt_regs[] = {
132         [BPF_REG_0] = offsetof(struct pt_regs, ax),
133         [BPF_REG_1] = offsetof(struct pt_regs, di),
134         [BPF_REG_2] = offsetof(struct pt_regs, si),
135         [BPF_REG_3] = offsetof(struct pt_regs, dx),
136         [BPF_REG_4] = offsetof(struct pt_regs, cx),
137         [BPF_REG_5] = offsetof(struct pt_regs, r8),
138         [BPF_REG_6] = offsetof(struct pt_regs, bx),
139         [BPF_REG_7] = offsetof(struct pt_regs, r13),
140         [BPF_REG_8] = offsetof(struct pt_regs, r14),
141         [BPF_REG_9] = offsetof(struct pt_regs, r15),
142 };
143
144 /*
145  * is_ereg() == true if BPF register 'reg' maps to x86-64 r8..r15
146  * which need extra byte of encoding.
147  * rax,rcx,...,rbp have simpler encoding
148  */
149 static bool is_ereg(u32 reg)
150 {
151         return (1 << reg) & (BIT(BPF_REG_5) |
152                              BIT(AUX_REG) |
153                              BIT(BPF_REG_7) |
154                              BIT(BPF_REG_8) |
155                              BIT(BPF_REG_9) |
156                              BIT(X86_REG_R9) |
157                              BIT(BPF_REG_AX));
158 }
159
160 /*
161  * is_ereg_8l() == true if BPF register 'reg' is mapped to access x86-64
162  * lower 8-bit registers dil,sil,bpl,spl,r8b..r15b, which need extra byte
163  * of encoding. al,cl,dl,bl have simpler encoding.
164  */
165 static bool is_ereg_8l(u32 reg)
166 {
167         return is_ereg(reg) ||
168             (1 << reg) & (BIT(BPF_REG_1) |
169                           BIT(BPF_REG_2) |
170                           BIT(BPF_REG_FP));
171 }
172
173 static bool is_axreg(u32 reg)
174 {
175         return reg == BPF_REG_0;
176 }
177
178 /* Add modifiers if 'reg' maps to x86-64 registers R8..R15 */
179 static u8 add_1mod(u8 byte, u32 reg)
180 {
181         if (is_ereg(reg))
182                 byte |= 1;
183         return byte;
184 }
185
186 static u8 add_2mod(u8 byte, u32 r1, u32 r2)
187 {
188         if (is_ereg(r1))
189                 byte |= 1;
190         if (is_ereg(r2))
191                 byte |= 4;
192         return byte;
193 }
194
195 /* Encode 'dst_reg' register into x86-64 opcode 'byte' */
196 static u8 add_1reg(u8 byte, u32 dst_reg)
197 {
198         return byte + reg2hex[dst_reg];
199 }
200
201 /* Encode 'dst_reg' and 'src_reg' registers into x86-64 opcode 'byte' */
202 static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg)
203 {
204         return byte + reg2hex[dst_reg] + (reg2hex[src_reg] << 3);
205 }
206
207 /* Some 1-byte opcodes for binary ALU operations */
208 static u8 simple_alu_opcodes[] = {
209         [BPF_ADD] = 0x01,
210         [BPF_SUB] = 0x29,
211         [BPF_AND] = 0x21,
212         [BPF_OR] = 0x09,
213         [BPF_XOR] = 0x31,
214         [BPF_LSH] = 0xE0,
215         [BPF_RSH] = 0xE8,
216         [BPF_ARSH] = 0xF8,
217 };
218
219 static void jit_fill_hole(void *area, unsigned int size)
220 {
221         /* Fill whole space with INT3 instructions */
222         memset(area, 0xcc, size);
223 }
224
225 struct jit_context {
226         int cleanup_addr; /* Epilogue code offset */
227
228         /*
229          * Program specific offsets of labels in the code; these rely on the
230          * JIT doing at least 2 passes, recording the position on the first
231          * pass, only to generate the correct offset on the second pass.
232          */
233         int tail_call_direct_label;
234         int tail_call_indirect_label;
235 };
236
237 /* Maximum number of bytes emitted while JITing one eBPF insn */
238 #define BPF_MAX_INSN_SIZE       128
239 #define BPF_INSN_SAFETY         64
240
241 /* Number of bytes emit_patch() needs to generate instructions */
242 #define X86_PATCH_SIZE          5
243 /* Number of bytes that will be skipped on tailcall */
244 #define X86_TAIL_CALL_OFFSET    11
245
246 static void push_callee_regs(u8 **pprog, bool *callee_regs_used)
247 {
248         u8 *prog = *pprog;
249
250         if (callee_regs_used[0])
251                 EMIT1(0x53);         /* push rbx */
252         if (callee_regs_used[1])
253                 EMIT2(0x41, 0x55);   /* push r13 */
254         if (callee_regs_used[2])
255                 EMIT2(0x41, 0x56);   /* push r14 */
256         if (callee_regs_used[3])
257                 EMIT2(0x41, 0x57);   /* push r15 */
258         *pprog = prog;
259 }
260
261 static void pop_callee_regs(u8 **pprog, bool *callee_regs_used)
262 {
263         u8 *prog = *pprog;
264
265         if (callee_regs_used[3])
266                 EMIT2(0x41, 0x5F);   /* pop r15 */
267         if (callee_regs_used[2])
268                 EMIT2(0x41, 0x5E);   /* pop r14 */
269         if (callee_regs_used[1])
270                 EMIT2(0x41, 0x5D);   /* pop r13 */
271         if (callee_regs_used[0])
272                 EMIT1(0x5B);         /* pop rbx */
273         *pprog = prog;
274 }
275
276 /*
277  * Emit x86-64 prologue code for BPF program.
278  * bpf_tail_call helper will skip the first X86_TAIL_CALL_OFFSET bytes
279  * while jumping to another program
280  */
281 static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf,
282                           bool tail_call_reachable, bool is_subprog)
283 {
284         u8 *prog = *pprog;
285
286         /* BPF trampoline can be made to work without these nops,
287          * but let's waste 5 bytes for now and optimize later
288          */
289         memcpy(prog, x86_nops[5], X86_PATCH_SIZE);
290         prog += X86_PATCH_SIZE;
291         if (!ebpf_from_cbpf) {
292                 if (tail_call_reachable && !is_subprog)
293                         EMIT2(0x31, 0xC0); /* xor eax, eax */
294                 else
295                         EMIT2(0x66, 0x90); /* nop2 */
296         }
297         EMIT1(0x55);             /* push rbp */
298         EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */
299         /* sub rsp, rounded_stack_depth */
300         if (stack_depth)
301                 EMIT3_off32(0x48, 0x81, 0xEC, round_up(stack_depth, 8));
302         if (tail_call_reachable)
303                 EMIT1(0x50);         /* push rax */
304         *pprog = prog;
305 }
306
307 static int emit_patch(u8 **pprog, void *func, void *ip, u8 opcode)
308 {
309         u8 *prog = *pprog;
310         s64 offset;
311
312         offset = func - (ip + X86_PATCH_SIZE);
313         if (!is_simm32(offset)) {
314                 pr_err("Target call %p is out of range\n", func);
315                 return -ERANGE;
316         }
317         EMIT1_off32(opcode, offset);
318         *pprog = prog;
319         return 0;
320 }
321
322 static int emit_call(u8 **pprog, void *func, void *ip)
323 {
324         return emit_patch(pprog, func, ip, 0xE8);
325 }
326
327 static int emit_jump(u8 **pprog, void *func, void *ip)
328 {
329         return emit_patch(pprog, func, ip, 0xE9);
330 }
331
332 static int __bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
333                                 void *old_addr, void *new_addr,
334                                 const bool text_live)
335 {
336         const u8 *nop_insn = x86_nops[5];
337         u8 old_insn[X86_PATCH_SIZE];
338         u8 new_insn[X86_PATCH_SIZE];
339         u8 *prog;
340         int ret;
341
342         memcpy(old_insn, nop_insn, X86_PATCH_SIZE);
343         if (old_addr) {
344                 prog = old_insn;
345                 ret = t == BPF_MOD_CALL ?
346                       emit_call(&prog, old_addr, ip) :
347                       emit_jump(&prog, old_addr, ip);
348                 if (ret)
349                         return ret;
350         }
351
352         memcpy(new_insn, nop_insn, X86_PATCH_SIZE);
353         if (new_addr) {
354                 prog = new_insn;
355                 ret = t == BPF_MOD_CALL ?
356                       emit_call(&prog, new_addr, ip) :
357                       emit_jump(&prog, new_addr, ip);
358                 if (ret)
359                         return ret;
360         }
361
362         ret = -EBUSY;
363         mutex_lock(&text_mutex);
364         if (memcmp(ip, old_insn, X86_PATCH_SIZE))
365                 goto out;
366         ret = 1;
367         if (memcmp(ip, new_insn, X86_PATCH_SIZE)) {
368                 if (text_live)
369                         text_poke_bp(ip, new_insn, X86_PATCH_SIZE, NULL);
370                 else
371                         memcpy(ip, new_insn, X86_PATCH_SIZE);
372                 ret = 0;
373         }
374 out:
375         mutex_unlock(&text_mutex);
376         return ret;
377 }
378
379 int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
380                        void *old_addr, void *new_addr)
381 {
382         if (!is_kernel_text((long)ip) &&
383             !is_bpf_text_address((long)ip))
384                 /* BPF poking in modules is not supported */
385                 return -EINVAL;
386
387         return __bpf_arch_text_poke(ip, t, old_addr, new_addr, true);
388 }
389
390 #define EMIT_LFENCE()   EMIT3(0x0F, 0xAE, 0xE8)
391
392 static void emit_indirect_jump(u8 **pprog, int reg, u8 *ip)
393 {
394         u8 *prog = *pprog;
395
396 #ifdef CONFIG_RETPOLINE
397         if (cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE)) {
398                 EMIT_LFENCE();
399                 EMIT2(0xFF, 0xE0 + reg);
400         } else if (cpu_feature_enabled(X86_FEATURE_RETPOLINE)) {
401                 emit_jump(&prog, &__x86_indirect_thunk_array[reg], ip);
402         } else
403 #endif
404         EMIT2(0xFF, 0xE0 + reg);
405
406         *pprog = prog;
407 }
408
409 static void emit_return(u8 **pprog, u8 *ip)
410 {
411         u8 *prog = *pprog;
412
413         if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) {
414                 emit_jump(&prog, &__x86_return_thunk, ip);
415         } else {
416                 EMIT1(0xC3);            /* ret */
417                 if (IS_ENABLED(CONFIG_SLS))
418                         EMIT1(0xCC);    /* int3 */
419         }
420
421         *pprog = prog;
422 }
423
424 /*
425  * Generate the following code:
426  *
427  * ... bpf_tail_call(void *ctx, struct bpf_array *array, u64 index) ...
428  *   if (index >= array->map.max_entries)
429  *     goto out;
430  *   if (++tail_call_cnt > MAX_TAIL_CALL_CNT)
431  *     goto out;
432  *   prog = array->ptrs[index];
433  *   if (prog == NULL)
434  *     goto out;
435  *   goto *(prog->bpf_func + prologue_size);
436  * out:
437  */
438 static void emit_bpf_tail_call_indirect(u8 **pprog, bool *callee_regs_used,
439                                         u32 stack_depth, u8 *ip,
440                                         struct jit_context *ctx)
441 {
442         int tcc_off = -4 - round_up(stack_depth, 8);
443         u8 *prog = *pprog, *start = *pprog;
444         int offset;
445
446         /*
447          * rdi - pointer to ctx
448          * rsi - pointer to bpf_array
449          * rdx - index in bpf_array
450          */
451
452         /*
453          * if (index >= array->map.max_entries)
454          *      goto out;
455          */
456         EMIT2(0x89, 0xD2);                        /* mov edx, edx */
457         EMIT3(0x39, 0x56,                         /* cmp dword ptr [rsi + 16], edx */
458               offsetof(struct bpf_array, map.max_entries));
459
460         offset = ctx->tail_call_indirect_label - (prog + 2 - start);
461         EMIT2(X86_JBE, offset);                   /* jbe out */
462
463         /*
464          * if (tail_call_cnt > MAX_TAIL_CALL_CNT)
465          *      goto out;
466          */
467         EMIT2_off32(0x8B, 0x85, tcc_off);         /* mov eax, dword ptr [rbp - tcc_off] */
468         EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT);     /* cmp eax, MAX_TAIL_CALL_CNT */
469
470         offset = ctx->tail_call_indirect_label - (prog + 2 - start);
471         EMIT2(X86_JA, offset);                    /* ja out */
472         EMIT3(0x83, 0xC0, 0x01);                  /* add eax, 1 */
473         EMIT2_off32(0x89, 0x85, tcc_off);         /* mov dword ptr [rbp - tcc_off], eax */
474
475         /* prog = array->ptrs[index]; */
476         EMIT4_off32(0x48, 0x8B, 0x8C, 0xD6,       /* mov rcx, [rsi + rdx * 8 + offsetof(...)] */
477                     offsetof(struct bpf_array, ptrs));
478
479         /*
480          * if (prog == NULL)
481          *      goto out;
482          */
483         EMIT3(0x48, 0x85, 0xC9);                  /* test rcx,rcx */
484
485         offset = ctx->tail_call_indirect_label - (prog + 2 - start);
486         EMIT2(X86_JE, offset);                    /* je out */
487
488         pop_callee_regs(&prog, callee_regs_used);
489
490         EMIT1(0x58);                              /* pop rax */
491         if (stack_depth)
492                 EMIT3_off32(0x48, 0x81, 0xC4,     /* add rsp, sd */
493                             round_up(stack_depth, 8));
494
495         /* goto *(prog->bpf_func + X86_TAIL_CALL_OFFSET); */
496         EMIT4(0x48, 0x8B, 0x49,                   /* mov rcx, qword ptr [rcx + 32] */
497               offsetof(struct bpf_prog, bpf_func));
498         EMIT4(0x48, 0x83, 0xC1,                   /* add rcx, X86_TAIL_CALL_OFFSET */
499               X86_TAIL_CALL_OFFSET);
500         /*
501          * Now we're ready to jump into next BPF program
502          * rdi == ctx (1st arg)
503          * rcx == prog->bpf_func + X86_TAIL_CALL_OFFSET
504          */
505         emit_indirect_jump(&prog, 1 /* rcx */, ip + (prog - start));
506
507         /* out: */
508         ctx->tail_call_indirect_label = prog - start;
509         *pprog = prog;
510 }
511
512 static void emit_bpf_tail_call_direct(struct bpf_jit_poke_descriptor *poke,
513                                       u8 **pprog, u8 *ip,
514                                       bool *callee_regs_used, u32 stack_depth,
515                                       struct jit_context *ctx)
516 {
517         int tcc_off = -4 - round_up(stack_depth, 8);
518         u8 *prog = *pprog, *start = *pprog;
519         int offset;
520
521         /*
522          * if (tail_call_cnt > MAX_TAIL_CALL_CNT)
523          *      goto out;
524          */
525         EMIT2_off32(0x8B, 0x85, tcc_off);             /* mov eax, dword ptr [rbp - tcc_off] */
526         EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT);         /* cmp eax, MAX_TAIL_CALL_CNT */
527
528         offset = ctx->tail_call_direct_label - (prog + 2 - start);
529         EMIT2(X86_JA, offset);                        /* ja out */
530         EMIT3(0x83, 0xC0, 0x01);                      /* add eax, 1 */
531         EMIT2_off32(0x89, 0x85, tcc_off);             /* mov dword ptr [rbp - tcc_off], eax */
532
533         poke->tailcall_bypass = ip + (prog - start);
534         poke->adj_off = X86_TAIL_CALL_OFFSET;
535         poke->tailcall_target = ip + ctx->tail_call_direct_label - X86_PATCH_SIZE;
536         poke->bypass_addr = (u8 *)poke->tailcall_target + X86_PATCH_SIZE;
537
538         emit_jump(&prog, (u8 *)poke->tailcall_target + X86_PATCH_SIZE,
539                   poke->tailcall_bypass);
540
541         pop_callee_regs(&prog, callee_regs_used);
542         EMIT1(0x58);                                  /* pop rax */
543         if (stack_depth)
544                 EMIT3_off32(0x48, 0x81, 0xC4, round_up(stack_depth, 8));
545
546         memcpy(prog, x86_nops[5], X86_PATCH_SIZE);
547         prog += X86_PATCH_SIZE;
548
549         /* out: */
550         ctx->tail_call_direct_label = prog - start;
551
552         *pprog = prog;
553 }
554
555 static void bpf_tail_call_direct_fixup(struct bpf_prog *prog)
556 {
557         struct bpf_jit_poke_descriptor *poke;
558         struct bpf_array *array;
559         struct bpf_prog *target;
560         int i, ret;
561
562         for (i = 0; i < prog->aux->size_poke_tab; i++) {
563                 poke = &prog->aux->poke_tab[i];
564                 if (poke->aux && poke->aux != prog->aux)
565                         continue;
566
567                 WARN_ON_ONCE(READ_ONCE(poke->tailcall_target_stable));
568
569                 if (poke->reason != BPF_POKE_REASON_TAIL_CALL)
570                         continue;
571
572                 array = container_of(poke->tail_call.map, struct bpf_array, map);
573                 mutex_lock(&array->aux->poke_mutex);
574                 target = array->ptrs[poke->tail_call.key];
575                 if (target) {
576                         /* Plain memcpy is used when image is not live yet
577                          * and still not locked as read-only. Once poke
578                          * location is active (poke->tailcall_target_stable),
579                          * any parallel bpf_arch_text_poke() might occur
580                          * still on the read-write image until we finally
581                          * locked it as read-only. Both modifications on
582                          * the given image are under text_mutex to avoid
583                          * interference.
584                          */
585                         ret = __bpf_arch_text_poke(poke->tailcall_target,
586                                                    BPF_MOD_JUMP, NULL,
587                                                    (u8 *)target->bpf_func +
588                                                    poke->adj_off, false);
589                         BUG_ON(ret < 0);
590                         ret = __bpf_arch_text_poke(poke->tailcall_bypass,
591                                                    BPF_MOD_JUMP,
592                                                    (u8 *)poke->tailcall_target +
593                                                    X86_PATCH_SIZE, NULL, false);
594                         BUG_ON(ret < 0);
595                 }
596                 WRITE_ONCE(poke->tailcall_target_stable, true);
597                 mutex_unlock(&array->aux->poke_mutex);
598         }
599 }
600
601 static void emit_mov_imm32(u8 **pprog, bool sign_propagate,
602                            u32 dst_reg, const u32 imm32)
603 {
604         u8 *prog = *pprog;
605         u8 b1, b2, b3;
606
607         /*
608          * Optimization: if imm32 is positive, use 'mov %eax, imm32'
609          * (which zero-extends imm32) to save 2 bytes.
610          */
611         if (sign_propagate && (s32)imm32 < 0) {
612                 /* 'mov %rax, imm32' sign extends imm32 */
613                 b1 = add_1mod(0x48, dst_reg);
614                 b2 = 0xC7;
615                 b3 = 0xC0;
616                 EMIT3_off32(b1, b2, add_1reg(b3, dst_reg), imm32);
617                 goto done;
618         }
619
620         /*
621          * Optimization: if imm32 is zero, use 'xor %eax, %eax'
622          * to save 3 bytes.
623          */
624         if (imm32 == 0) {
625                 if (is_ereg(dst_reg))
626                         EMIT1(add_2mod(0x40, dst_reg, dst_reg));
627                 b2 = 0x31; /* xor */
628                 b3 = 0xC0;
629                 EMIT2(b2, add_2reg(b3, dst_reg, dst_reg));
630                 goto done;
631         }
632
633         /* mov %eax, imm32 */
634         if (is_ereg(dst_reg))
635                 EMIT1(add_1mod(0x40, dst_reg));
636         EMIT1_off32(add_1reg(0xB8, dst_reg), imm32);
637 done:
638         *pprog = prog;
639 }
640
641 static void emit_mov_imm64(u8 **pprog, u32 dst_reg,
642                            const u32 imm32_hi, const u32 imm32_lo)
643 {
644         u8 *prog = *pprog;
645
646         if (is_uimm32(((u64)imm32_hi << 32) | (u32)imm32_lo)) {
647                 /*
648                  * For emitting plain u32, where sign bit must not be
649                  * propagated LLVM tends to load imm64 over mov32
650                  * directly, so save couple of bytes by just doing
651                  * 'mov %eax, imm32' instead.
652                  */
653                 emit_mov_imm32(&prog, false, dst_reg, imm32_lo);
654         } else {
655                 /* movabsq %rax, imm64 */
656                 EMIT2(add_1mod(0x48, dst_reg), add_1reg(0xB8, dst_reg));
657                 EMIT(imm32_lo, 4);
658                 EMIT(imm32_hi, 4);
659         }
660
661         *pprog = prog;
662 }
663
664 static void emit_mov_reg(u8 **pprog, bool is64, u32 dst_reg, u32 src_reg)
665 {
666         u8 *prog = *pprog;
667
668         if (is64) {
669                 /* mov dst, src */
670                 EMIT_mov(dst_reg, src_reg);
671         } else {
672                 /* mov32 dst, src */
673                 if (is_ereg(dst_reg) || is_ereg(src_reg))
674                         EMIT1(add_2mod(0x40, dst_reg, src_reg));
675                 EMIT2(0x89, add_2reg(0xC0, dst_reg, src_reg));
676         }
677
678         *pprog = prog;
679 }
680
681 /* Emit the suffix (ModR/M etc) for addressing *(ptr_reg + off) and val_reg */
682 static void emit_insn_suffix(u8 **pprog, u32 ptr_reg, u32 val_reg, int off)
683 {
684         u8 *prog = *pprog;
685
686         if (is_imm8(off)) {
687                 /* 1-byte signed displacement.
688                  *
689                  * If off == 0 we could skip this and save one extra byte, but
690                  * special case of x86 R13 which always needs an offset is not
691                  * worth the hassle
692                  */
693                 EMIT2(add_2reg(0x40, ptr_reg, val_reg), off);
694         } else {
695                 /* 4-byte signed displacement */
696                 EMIT1_off32(add_2reg(0x80, ptr_reg, val_reg), off);
697         }
698         *pprog = prog;
699 }
700
701 /*
702  * Emit a REX byte if it will be necessary to address these registers
703  */
704 static void maybe_emit_mod(u8 **pprog, u32 dst_reg, u32 src_reg, bool is64)
705 {
706         u8 *prog = *pprog;
707
708         if (is64)
709                 EMIT1(add_2mod(0x48, dst_reg, src_reg));
710         else if (is_ereg(dst_reg) || is_ereg(src_reg))
711                 EMIT1(add_2mod(0x40, dst_reg, src_reg));
712         *pprog = prog;
713 }
714
715 /*
716  * Similar version of maybe_emit_mod() for a single register
717  */
718 static void maybe_emit_1mod(u8 **pprog, u32 reg, bool is64)
719 {
720         u8 *prog = *pprog;
721
722         if (is64)
723                 EMIT1(add_1mod(0x48, reg));
724         else if (is_ereg(reg))
725                 EMIT1(add_1mod(0x40, reg));
726         *pprog = prog;
727 }
728
729 /* LDX: dst_reg = *(u8*)(src_reg + off) */
730 static void emit_ldx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
731 {
732         u8 *prog = *pprog;
733
734         switch (size) {
735         case BPF_B:
736                 /* Emit 'movzx rax, byte ptr [rax + off]' */
737                 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB6);
738                 break;
739         case BPF_H:
740                 /* Emit 'movzx rax, word ptr [rax + off]' */
741                 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB7);
742                 break;
743         case BPF_W:
744                 /* Emit 'mov eax, dword ptr [rax+0x14]' */
745                 if (is_ereg(dst_reg) || is_ereg(src_reg))
746                         EMIT2(add_2mod(0x40, src_reg, dst_reg), 0x8B);
747                 else
748                         EMIT1(0x8B);
749                 break;
750         case BPF_DW:
751                 /* Emit 'mov rax, qword ptr [rax+0x14]' */
752                 EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x8B);
753                 break;
754         }
755         emit_insn_suffix(&prog, src_reg, dst_reg, off);
756         *pprog = prog;
757 }
758
759 /* STX: *(u8*)(dst_reg + off) = src_reg */
760 static void emit_stx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
761 {
762         u8 *prog = *pprog;
763
764         switch (size) {
765         case BPF_B:
766                 /* Emit 'mov byte ptr [rax + off], al' */
767                 if (is_ereg(dst_reg) || is_ereg_8l(src_reg))
768                         /* Add extra byte for eregs or SIL,DIL,BPL in src_reg */
769                         EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88);
770                 else
771                         EMIT1(0x88);
772                 break;
773         case BPF_H:
774                 if (is_ereg(dst_reg) || is_ereg(src_reg))
775                         EMIT3(0x66, add_2mod(0x40, dst_reg, src_reg), 0x89);
776                 else
777                         EMIT2(0x66, 0x89);
778                 break;
779         case BPF_W:
780                 if (is_ereg(dst_reg) || is_ereg(src_reg))
781                         EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x89);
782                 else
783                         EMIT1(0x89);
784                 break;
785         case BPF_DW:
786                 EMIT2(add_2mod(0x48, dst_reg, src_reg), 0x89);
787                 break;
788         }
789         emit_insn_suffix(&prog, dst_reg, src_reg, off);
790         *pprog = prog;
791 }
792
793 static int emit_atomic(u8 **pprog, u8 atomic_op,
794                        u32 dst_reg, u32 src_reg, s16 off, u8 bpf_size)
795 {
796         u8 *prog = *pprog;
797
798         EMIT1(0xF0); /* lock prefix */
799
800         maybe_emit_mod(&prog, dst_reg, src_reg, bpf_size == BPF_DW);
801
802         /* emit opcode */
803         switch (atomic_op) {
804         case BPF_ADD:
805         case BPF_SUB:
806         case BPF_AND:
807         case BPF_OR:
808         case BPF_XOR:
809                 /* lock *(u32/u64*)(dst_reg + off) <op>= src_reg */
810                 EMIT1(simple_alu_opcodes[atomic_op]);
811                 break;
812         case BPF_ADD | BPF_FETCH:
813                 /* src_reg = atomic_fetch_add(dst_reg + off, src_reg); */
814                 EMIT2(0x0F, 0xC1);
815                 break;
816         case BPF_XCHG:
817                 /* src_reg = atomic_xchg(dst_reg + off, src_reg); */
818                 EMIT1(0x87);
819                 break;
820         case BPF_CMPXCHG:
821                 /* r0 = atomic_cmpxchg(dst_reg + off, r0, src_reg); */
822                 EMIT2(0x0F, 0xB1);
823                 break;
824         default:
825                 pr_err("bpf_jit: unknown atomic opcode %02x\n", atomic_op);
826                 return -EFAULT;
827         }
828
829         emit_insn_suffix(&prog, dst_reg, src_reg, off);
830
831         *pprog = prog;
832         return 0;
833 }
834
835 bool ex_handler_bpf(const struct exception_table_entry *x, struct pt_regs *regs)
836 {
837         u32 reg = x->fixup >> 8;
838
839         /* jump over faulting load and clear dest register */
840         *(unsigned long *)((void *)regs + reg) = 0;
841         regs->ip += x->fixup & 0xff;
842         return true;
843 }
844
845 static void detect_reg_usage(struct bpf_insn *insn, int insn_cnt,
846                              bool *regs_used, bool *tail_call_seen)
847 {
848         int i;
849
850         for (i = 1; i <= insn_cnt; i++, insn++) {
851                 if (insn->code == (BPF_JMP | BPF_TAIL_CALL))
852                         *tail_call_seen = true;
853                 if (insn->dst_reg == BPF_REG_6 || insn->src_reg == BPF_REG_6)
854                         regs_used[0] = true;
855                 if (insn->dst_reg == BPF_REG_7 || insn->src_reg == BPF_REG_7)
856                         regs_used[1] = true;
857                 if (insn->dst_reg == BPF_REG_8 || insn->src_reg == BPF_REG_8)
858                         regs_used[2] = true;
859                 if (insn->dst_reg == BPF_REG_9 || insn->src_reg == BPF_REG_9)
860                         regs_used[3] = true;
861         }
862 }
863
864 static void emit_nops(u8 **pprog, int len)
865 {
866         u8 *prog = *pprog;
867         int i, noplen;
868
869         while (len > 0) {
870                 noplen = len;
871
872                 if (noplen > ASM_NOP_MAX)
873                         noplen = ASM_NOP_MAX;
874
875                 for (i = 0; i < noplen; i++)
876                         EMIT1(x86_nops[noplen][i]);
877                 len -= noplen;
878         }
879
880         *pprog = prog;
881 }
882
883 #define INSN_SZ_DIFF (((addrs[i] - addrs[i - 1]) - (prog - temp)))
884
885 static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
886                   int oldproglen, struct jit_context *ctx, bool jmp_padding)
887 {
888         bool tail_call_reachable = bpf_prog->aux->tail_call_reachable;
889         struct bpf_insn *insn = bpf_prog->insnsi;
890         bool callee_regs_used[4] = {};
891         int insn_cnt = bpf_prog->len;
892         bool tail_call_seen = false;
893         bool seen_exit = false;
894         u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY];
895         int i, excnt = 0;
896         int ilen, proglen = 0;
897         u8 *prog = temp;
898         int err;
899
900         detect_reg_usage(insn, insn_cnt, callee_regs_used,
901                          &tail_call_seen);
902
903         /* tail call's presence in current prog implies it is reachable */
904         tail_call_reachable |= tail_call_seen;
905
906         emit_prologue(&prog, bpf_prog->aux->stack_depth,
907                       bpf_prog_was_classic(bpf_prog), tail_call_reachable,
908                       bpf_prog->aux->func_idx != 0);
909         push_callee_regs(&prog, callee_regs_used);
910
911         ilen = prog - temp;
912         if (image)
913                 memcpy(image + proglen, temp, ilen);
914         proglen += ilen;
915         addrs[0] = proglen;
916         prog = temp;
917
918         for (i = 1; i <= insn_cnt; i++, insn++) {
919                 const s32 imm32 = insn->imm;
920                 u32 dst_reg = insn->dst_reg;
921                 u32 src_reg = insn->src_reg;
922                 u8 b2 = 0, b3 = 0;
923                 u8 *start_of_ldx;
924                 s64 jmp_offset;
925                 u8 jmp_cond;
926                 u8 *func;
927                 int nops;
928
929                 switch (insn->code) {
930                         /* ALU */
931                 case BPF_ALU | BPF_ADD | BPF_X:
932                 case BPF_ALU | BPF_SUB | BPF_X:
933                 case BPF_ALU | BPF_AND | BPF_X:
934                 case BPF_ALU | BPF_OR | BPF_X:
935                 case BPF_ALU | BPF_XOR | BPF_X:
936                 case BPF_ALU64 | BPF_ADD | BPF_X:
937                 case BPF_ALU64 | BPF_SUB | BPF_X:
938                 case BPF_ALU64 | BPF_AND | BPF_X:
939                 case BPF_ALU64 | BPF_OR | BPF_X:
940                 case BPF_ALU64 | BPF_XOR | BPF_X:
941                         maybe_emit_mod(&prog, dst_reg, src_reg,
942                                        BPF_CLASS(insn->code) == BPF_ALU64);
943                         b2 = simple_alu_opcodes[BPF_OP(insn->code)];
944                         EMIT2(b2, add_2reg(0xC0, dst_reg, src_reg));
945                         break;
946
947                 case BPF_ALU64 | BPF_MOV | BPF_X:
948                 case BPF_ALU | BPF_MOV | BPF_X:
949                         emit_mov_reg(&prog,
950                                      BPF_CLASS(insn->code) == BPF_ALU64,
951                                      dst_reg, src_reg);
952                         break;
953
954                         /* neg dst */
955                 case BPF_ALU | BPF_NEG:
956                 case BPF_ALU64 | BPF_NEG:
957                         maybe_emit_1mod(&prog, dst_reg,
958                                         BPF_CLASS(insn->code) == BPF_ALU64);
959                         EMIT2(0xF7, add_1reg(0xD8, dst_reg));
960                         break;
961
962                 case BPF_ALU | BPF_ADD | BPF_K:
963                 case BPF_ALU | BPF_SUB | BPF_K:
964                 case BPF_ALU | BPF_AND | BPF_K:
965                 case BPF_ALU | BPF_OR | BPF_K:
966                 case BPF_ALU | BPF_XOR | BPF_K:
967                 case BPF_ALU64 | BPF_ADD | BPF_K:
968                 case BPF_ALU64 | BPF_SUB | BPF_K:
969                 case BPF_ALU64 | BPF_AND | BPF_K:
970                 case BPF_ALU64 | BPF_OR | BPF_K:
971                 case BPF_ALU64 | BPF_XOR | BPF_K:
972                         maybe_emit_1mod(&prog, dst_reg,
973                                         BPF_CLASS(insn->code) == BPF_ALU64);
974
975                         /*
976                          * b3 holds 'normal' opcode, b2 short form only valid
977                          * in case dst is eax/rax.
978                          */
979                         switch (BPF_OP(insn->code)) {
980                         case BPF_ADD:
981                                 b3 = 0xC0;
982                                 b2 = 0x05;
983                                 break;
984                         case BPF_SUB:
985                                 b3 = 0xE8;
986                                 b2 = 0x2D;
987                                 break;
988                         case BPF_AND:
989                                 b3 = 0xE0;
990                                 b2 = 0x25;
991                                 break;
992                         case BPF_OR:
993                                 b3 = 0xC8;
994                                 b2 = 0x0D;
995                                 break;
996                         case BPF_XOR:
997                                 b3 = 0xF0;
998                                 b2 = 0x35;
999                                 break;
1000                         }
1001
1002                         if (is_imm8(imm32))
1003                                 EMIT3(0x83, add_1reg(b3, dst_reg), imm32);
1004                         else if (is_axreg(dst_reg))
1005                                 EMIT1_off32(b2, imm32);
1006                         else
1007                                 EMIT2_off32(0x81, add_1reg(b3, dst_reg), imm32);
1008                         break;
1009
1010                 case BPF_ALU64 | BPF_MOV | BPF_K:
1011                 case BPF_ALU | BPF_MOV | BPF_K:
1012                         emit_mov_imm32(&prog, BPF_CLASS(insn->code) == BPF_ALU64,
1013                                        dst_reg, imm32);
1014                         break;
1015
1016                 case BPF_LD | BPF_IMM | BPF_DW:
1017                         emit_mov_imm64(&prog, dst_reg, insn[1].imm, insn[0].imm);
1018                         insn++;
1019                         i++;
1020                         break;
1021
1022                         /* dst %= src, dst /= src, dst %= imm32, dst /= imm32 */
1023                 case BPF_ALU | BPF_MOD | BPF_X:
1024                 case BPF_ALU | BPF_DIV | BPF_X:
1025                 case BPF_ALU | BPF_MOD | BPF_K:
1026                 case BPF_ALU | BPF_DIV | BPF_K:
1027                 case BPF_ALU64 | BPF_MOD | BPF_X:
1028                 case BPF_ALU64 | BPF_DIV | BPF_X:
1029                 case BPF_ALU64 | BPF_MOD | BPF_K:
1030                 case BPF_ALU64 | BPF_DIV | BPF_K:
1031                         EMIT1(0x50); /* push rax */
1032                         EMIT1(0x52); /* push rdx */
1033
1034                         if (BPF_SRC(insn->code) == BPF_X)
1035                                 /* mov r11, src_reg */
1036                                 EMIT_mov(AUX_REG, src_reg);
1037                         else
1038                                 /* mov r11, imm32 */
1039                                 EMIT3_off32(0x49, 0xC7, 0xC3, imm32);
1040
1041                         /* mov rax, dst_reg */
1042                         EMIT_mov(BPF_REG_0, dst_reg);
1043
1044                         /*
1045                          * xor edx, edx
1046                          * equivalent to 'xor rdx, rdx', but one byte less
1047                          */
1048                         EMIT2(0x31, 0xd2);
1049
1050                         if (BPF_CLASS(insn->code) == BPF_ALU64)
1051                                 /* div r11 */
1052                                 EMIT3(0x49, 0xF7, 0xF3);
1053                         else
1054                                 /* div r11d */
1055                                 EMIT3(0x41, 0xF7, 0xF3);
1056
1057                         if (BPF_OP(insn->code) == BPF_MOD)
1058                                 /* mov r11, rdx */
1059                                 EMIT3(0x49, 0x89, 0xD3);
1060                         else
1061                                 /* mov r11, rax */
1062                                 EMIT3(0x49, 0x89, 0xC3);
1063
1064                         EMIT1(0x5A); /* pop rdx */
1065                         EMIT1(0x58); /* pop rax */
1066
1067                         /* mov dst_reg, r11 */
1068                         EMIT_mov(dst_reg, AUX_REG);
1069                         break;
1070
1071                 case BPF_ALU | BPF_MUL | BPF_K:
1072                 case BPF_ALU | BPF_MUL | BPF_X:
1073                 case BPF_ALU64 | BPF_MUL | BPF_K:
1074                 case BPF_ALU64 | BPF_MUL | BPF_X:
1075                 {
1076                         bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;
1077
1078                         if (dst_reg != BPF_REG_0)
1079                                 EMIT1(0x50); /* push rax */
1080                         if (dst_reg != BPF_REG_3)
1081                                 EMIT1(0x52); /* push rdx */
1082
1083                         /* mov r11, dst_reg */
1084                         EMIT_mov(AUX_REG, dst_reg);
1085
1086                         if (BPF_SRC(insn->code) == BPF_X)
1087                                 emit_mov_reg(&prog, is64, BPF_REG_0, src_reg);
1088                         else
1089                                 emit_mov_imm32(&prog, is64, BPF_REG_0, imm32);
1090
1091                         if (is64)
1092                                 EMIT1(add_1mod(0x48, AUX_REG));
1093                         else if (is_ereg(AUX_REG))
1094                                 EMIT1(add_1mod(0x40, AUX_REG));
1095                         /* mul(q) r11 */
1096                         EMIT2(0xF7, add_1reg(0xE0, AUX_REG));
1097
1098                         if (dst_reg != BPF_REG_3)
1099                                 EMIT1(0x5A); /* pop rdx */
1100                         if (dst_reg != BPF_REG_0) {
1101                                 /* mov dst_reg, rax */
1102                                 EMIT_mov(dst_reg, BPF_REG_0);
1103                                 EMIT1(0x58); /* pop rax */
1104                         }
1105                         break;
1106                 }
1107                         /* Shifts */
1108                 case BPF_ALU | BPF_LSH | BPF_K:
1109                 case BPF_ALU | BPF_RSH | BPF_K:
1110                 case BPF_ALU | BPF_ARSH | BPF_K:
1111                 case BPF_ALU64 | BPF_LSH | BPF_K:
1112                 case BPF_ALU64 | BPF_RSH | BPF_K:
1113                 case BPF_ALU64 | BPF_ARSH | BPF_K:
1114                         maybe_emit_1mod(&prog, dst_reg,
1115                                         BPF_CLASS(insn->code) == BPF_ALU64);
1116
1117                         b3 = simple_alu_opcodes[BPF_OP(insn->code)];
1118                         if (imm32 == 1)
1119                                 EMIT2(0xD1, add_1reg(b3, dst_reg));
1120                         else
1121                                 EMIT3(0xC1, add_1reg(b3, dst_reg), imm32);
1122                         break;
1123
1124                 case BPF_ALU | BPF_LSH | BPF_X:
1125                 case BPF_ALU | BPF_RSH | BPF_X:
1126                 case BPF_ALU | BPF_ARSH | BPF_X:
1127                 case BPF_ALU64 | BPF_LSH | BPF_X:
1128                 case BPF_ALU64 | BPF_RSH | BPF_X:
1129                 case BPF_ALU64 | BPF_ARSH | BPF_X:
1130
1131                         /* Check for bad case when dst_reg == rcx */
1132                         if (dst_reg == BPF_REG_4) {
1133                                 /* mov r11, dst_reg */
1134                                 EMIT_mov(AUX_REG, dst_reg);
1135                                 dst_reg = AUX_REG;
1136                         }
1137
1138                         if (src_reg != BPF_REG_4) { /* common case */
1139                                 EMIT1(0x51); /* push rcx */
1140
1141                                 /* mov rcx, src_reg */
1142                                 EMIT_mov(BPF_REG_4, src_reg);
1143                         }
1144
1145                         /* shl %rax, %cl | shr %rax, %cl | sar %rax, %cl */
1146                         maybe_emit_1mod(&prog, dst_reg,
1147                                         BPF_CLASS(insn->code) == BPF_ALU64);
1148
1149                         b3 = simple_alu_opcodes[BPF_OP(insn->code)];
1150                         EMIT2(0xD3, add_1reg(b3, dst_reg));
1151
1152                         if (src_reg != BPF_REG_4)
1153                                 EMIT1(0x59); /* pop rcx */
1154
1155                         if (insn->dst_reg == BPF_REG_4)
1156                                 /* mov dst_reg, r11 */
1157                                 EMIT_mov(insn->dst_reg, AUX_REG);
1158                         break;
1159
1160                 case BPF_ALU | BPF_END | BPF_FROM_BE:
1161                         switch (imm32) {
1162                         case 16:
1163                                 /* Emit 'ror %ax, 8' to swap lower 2 bytes */
1164                                 EMIT1(0x66);
1165                                 if (is_ereg(dst_reg))
1166                                         EMIT1(0x41);
1167                                 EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8);
1168
1169                                 /* Emit 'movzwl eax, ax' */
1170                                 if (is_ereg(dst_reg))
1171                                         EMIT3(0x45, 0x0F, 0xB7);
1172                                 else
1173                                         EMIT2(0x0F, 0xB7);
1174                                 EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
1175                                 break;
1176                         case 32:
1177                                 /* Emit 'bswap eax' to swap lower 4 bytes */
1178                                 if (is_ereg(dst_reg))
1179                                         EMIT2(0x41, 0x0F);
1180                                 else
1181                                         EMIT1(0x0F);
1182                                 EMIT1(add_1reg(0xC8, dst_reg));
1183                                 break;
1184                         case 64:
1185                                 /* Emit 'bswap rax' to swap 8 bytes */
1186                                 EMIT3(add_1mod(0x48, dst_reg), 0x0F,
1187                                       add_1reg(0xC8, dst_reg));
1188                                 break;
1189                         }
1190                         break;
1191
1192                 case BPF_ALU | BPF_END | BPF_FROM_LE:
1193                         switch (imm32) {
1194                         case 16:
1195                                 /*
1196                                  * Emit 'movzwl eax, ax' to zero extend 16-bit
1197                                  * into 64 bit
1198                                  */
1199                                 if (is_ereg(dst_reg))
1200                                         EMIT3(0x45, 0x0F, 0xB7);
1201                                 else
1202                                         EMIT2(0x0F, 0xB7);
1203                                 EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
1204                                 break;
1205                         case 32:
1206                                 /* Emit 'mov eax, eax' to clear upper 32-bits */
1207                                 if (is_ereg(dst_reg))
1208                                         EMIT1(0x45);
1209                                 EMIT2(0x89, add_2reg(0xC0, dst_reg, dst_reg));
1210                                 break;
1211                         case 64:
1212                                 /* nop */
1213                                 break;
1214                         }
1215                         break;
1216
1217                         /* speculation barrier */
1218                 case BPF_ST | BPF_NOSPEC:
1219                         if (boot_cpu_has(X86_FEATURE_XMM2))
1220                                 EMIT_LFENCE();
1221                         break;
1222
1223                         /* ST: *(u8*)(dst_reg + off) = imm */
1224                 case BPF_ST | BPF_MEM | BPF_B:
1225                         if (is_ereg(dst_reg))
1226                                 EMIT2(0x41, 0xC6);
1227                         else
1228                                 EMIT1(0xC6);
1229                         goto st;
1230                 case BPF_ST | BPF_MEM | BPF_H:
1231                         if (is_ereg(dst_reg))
1232                                 EMIT3(0x66, 0x41, 0xC7);
1233                         else
1234                                 EMIT2(0x66, 0xC7);
1235                         goto st;
1236                 case BPF_ST | BPF_MEM | BPF_W:
1237                         if (is_ereg(dst_reg))
1238                                 EMIT2(0x41, 0xC7);
1239                         else
1240                                 EMIT1(0xC7);
1241                         goto st;
1242                 case BPF_ST | BPF_MEM | BPF_DW:
1243                         EMIT2(add_1mod(0x48, dst_reg), 0xC7);
1244
1245 st:                     if (is_imm8(insn->off))
1246                                 EMIT2(add_1reg(0x40, dst_reg), insn->off);
1247                         else
1248                                 EMIT1_off32(add_1reg(0x80, dst_reg), insn->off);
1249
1250                         EMIT(imm32, bpf_size_to_x86_bytes(BPF_SIZE(insn->code)));
1251                         break;
1252
1253                         /* STX: *(u8*)(dst_reg + off) = src_reg */
1254                 case BPF_STX | BPF_MEM | BPF_B:
1255                 case BPF_STX | BPF_MEM | BPF_H:
1256                 case BPF_STX | BPF_MEM | BPF_W:
1257                 case BPF_STX | BPF_MEM | BPF_DW:
1258                         emit_stx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
1259                         break;
1260
1261                         /* LDX: dst_reg = *(u8*)(src_reg + off) */
1262                 case BPF_LDX | BPF_MEM | BPF_B:
1263                 case BPF_LDX | BPF_PROBE_MEM | BPF_B:
1264                 case BPF_LDX | BPF_MEM | BPF_H:
1265                 case BPF_LDX | BPF_PROBE_MEM | BPF_H:
1266                 case BPF_LDX | BPF_MEM | BPF_W:
1267                 case BPF_LDX | BPF_PROBE_MEM | BPF_W:
1268                 case BPF_LDX | BPF_MEM | BPF_DW:
1269                 case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
1270                         if (BPF_MODE(insn->code) == BPF_PROBE_MEM) {
1271                                 /* Though the verifier prevents negative insn->off in BPF_PROBE_MEM
1272                                  * add abs(insn->off) to the limit to make sure that negative
1273                                  * offset won't be an issue.
1274                                  * insn->off is s16, so it won't affect valid pointers.
1275                                  */
1276                                 u64 limit = TASK_SIZE_MAX + PAGE_SIZE + abs(insn->off);
1277                                 u8 *end_of_jmp1, *end_of_jmp2;
1278
1279                                 /* Conservatively check that src_reg + insn->off is a kernel address:
1280                                  * 1. src_reg + insn->off >= limit
1281                                  * 2. src_reg + insn->off doesn't become small positive.
1282                                  * Cannot do src_reg + insn->off >= limit in one branch,
1283                                  * since it needs two spare registers, but JIT has only one.
1284                                  */
1285
1286                                 /* movabsq r11, limit */
1287                                 EMIT2(add_1mod(0x48, AUX_REG), add_1reg(0xB8, AUX_REG));
1288                                 EMIT((u32)limit, 4);
1289                                 EMIT(limit >> 32, 4);
1290                                 /* cmp src_reg, r11 */
1291                                 maybe_emit_mod(&prog, src_reg, AUX_REG, true);
1292                                 EMIT2(0x39, add_2reg(0xC0, src_reg, AUX_REG));
1293                                 /* if unsigned '<' goto end_of_jmp2 */
1294                                 EMIT2(X86_JB, 0);
1295                                 end_of_jmp1 = prog;
1296
1297                                 /* mov r11, src_reg */
1298                                 emit_mov_reg(&prog, true, AUX_REG, src_reg);
1299                                 /* add r11, insn->off */
1300                                 maybe_emit_1mod(&prog, AUX_REG, true);
1301                                 EMIT2_off32(0x81, add_1reg(0xC0, AUX_REG), insn->off);
1302                                 /* jmp if not carry to start_of_ldx
1303                                  * Otherwise ERR_PTR(-EINVAL) + 128 will be the user addr
1304                                  * that has to be rejected.
1305                                  */
1306                                 EMIT2(0x73 /* JNC */, 0);
1307                                 end_of_jmp2 = prog;
1308
1309                                 /* xor dst_reg, dst_reg */
1310                                 emit_mov_imm32(&prog, false, dst_reg, 0);
1311                                 /* jmp byte_after_ldx */
1312                                 EMIT2(0xEB, 0);
1313
1314                                 /* populate jmp_offset for JB above to jump to xor dst_reg */
1315                                 end_of_jmp1[-1] = end_of_jmp2 - end_of_jmp1;
1316                                 /* populate jmp_offset for JNC above to jump to start_of_ldx */
1317                                 start_of_ldx = prog;
1318                                 end_of_jmp2[-1] = start_of_ldx - end_of_jmp2;
1319                         }
1320                         emit_ldx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
1321                         if (BPF_MODE(insn->code) == BPF_PROBE_MEM) {
1322                                 struct exception_table_entry *ex;
1323                                 u8 *_insn = image + proglen + (start_of_ldx - temp);
1324                                 s64 delta;
1325
1326                                 /* populate jmp_offset for JMP above */
1327                                 start_of_ldx[-1] = prog - start_of_ldx;
1328
1329                                 if (!bpf_prog->aux->extable)
1330                                         break;
1331
1332                                 if (excnt >= bpf_prog->aux->num_exentries) {
1333                                         pr_err("ex gen bug\n");
1334                                         return -EFAULT;
1335                                 }
1336                                 ex = &bpf_prog->aux->extable[excnt++];
1337
1338                                 delta = _insn - (u8 *)&ex->insn;
1339                                 if (!is_simm32(delta)) {
1340                                         pr_err("extable->insn doesn't fit into 32-bit\n");
1341                                         return -EFAULT;
1342                                 }
1343                                 ex->insn = delta;
1344
1345                                 ex->type = EX_TYPE_BPF;
1346
1347                                 if (dst_reg > BPF_REG_9) {
1348                                         pr_err("verifier error\n");
1349                                         return -EFAULT;
1350                                 }
1351                                 /*
1352                                  * Compute size of x86 insn and its target dest x86 register.
1353                                  * ex_handler_bpf() will use lower 8 bits to adjust
1354                                  * pt_regs->ip to jump over this x86 instruction
1355                                  * and upper bits to figure out which pt_regs to zero out.
1356                                  * End result: x86 insn "mov rbx, qword ptr [rax+0x14]"
1357                                  * of 4 bytes will be ignored and rbx will be zero inited.
1358                                  */
1359                                 ex->fixup = (prog - start_of_ldx) | (reg2pt_regs[dst_reg] << 8);
1360                         }
1361                         break;
1362
1363                 case BPF_STX | BPF_ATOMIC | BPF_W:
1364                 case BPF_STX | BPF_ATOMIC | BPF_DW:
1365                         if (insn->imm == (BPF_AND | BPF_FETCH) ||
1366                             insn->imm == (BPF_OR | BPF_FETCH) ||
1367                             insn->imm == (BPF_XOR | BPF_FETCH)) {
1368                                 bool is64 = BPF_SIZE(insn->code) == BPF_DW;
1369                                 u32 real_src_reg = src_reg;
1370                                 u32 real_dst_reg = dst_reg;
1371                                 u8 *branch_target;
1372
1373                                 /*
1374                                  * Can't be implemented with a single x86 insn.
1375                                  * Need to do a CMPXCHG loop.
1376                                  */
1377
1378                                 /* Will need RAX as a CMPXCHG operand so save R0 */
1379                                 emit_mov_reg(&prog, true, BPF_REG_AX, BPF_REG_0);
1380                                 if (src_reg == BPF_REG_0)
1381                                         real_src_reg = BPF_REG_AX;
1382                                 if (dst_reg == BPF_REG_0)
1383                                         real_dst_reg = BPF_REG_AX;
1384
1385                                 branch_target = prog;
1386                                 /* Load old value */
1387                                 emit_ldx(&prog, BPF_SIZE(insn->code),
1388                                          BPF_REG_0, real_dst_reg, insn->off);
1389                                 /*
1390                                  * Perform the (commutative) operation locally,
1391                                  * put the result in the AUX_REG.
1392                                  */
1393                                 emit_mov_reg(&prog, is64, AUX_REG, BPF_REG_0);
1394                                 maybe_emit_mod(&prog, AUX_REG, real_src_reg, is64);
1395                                 EMIT2(simple_alu_opcodes[BPF_OP(insn->imm)],
1396                                       add_2reg(0xC0, AUX_REG, real_src_reg));
1397                                 /* Attempt to swap in new value */
1398                                 err = emit_atomic(&prog, BPF_CMPXCHG,
1399                                                   real_dst_reg, AUX_REG,
1400                                                   insn->off,
1401                                                   BPF_SIZE(insn->code));
1402                                 if (WARN_ON(err))
1403                                         return err;
1404                                 /*
1405                                  * ZF tells us whether we won the race. If it's
1406                                  * cleared we need to try again.
1407                                  */
1408                                 EMIT2(X86_JNE, -(prog - branch_target) - 2);
1409                                 /* Return the pre-modification value */
1410                                 emit_mov_reg(&prog, is64, real_src_reg, BPF_REG_0);
1411                                 /* Restore R0 after clobbering RAX */
1412                                 emit_mov_reg(&prog, true, BPF_REG_0, BPF_REG_AX);
1413                                 break;
1414                         }
1415
1416                         err = emit_atomic(&prog, insn->imm, dst_reg, src_reg,
1417                                           insn->off, BPF_SIZE(insn->code));
1418                         if (err)
1419                                 return err;
1420                         break;
1421
1422                         /* call */
1423                 case BPF_JMP | BPF_CALL:
1424                         func = (u8 *) __bpf_call_base + imm32;
1425                         if (tail_call_reachable) {
1426                                 /* mov rax, qword ptr [rbp - rounded_stack_depth - 8] */
1427                                 EMIT3_off32(0x48, 0x8B, 0x85,
1428                                             -round_up(bpf_prog->aux->stack_depth, 8) - 8);
1429                                 if (!imm32 || emit_call(&prog, func, image + addrs[i - 1] + 7))
1430                                         return -EINVAL;
1431                         } else {
1432                                 if (!imm32 || emit_call(&prog, func, image + addrs[i - 1]))
1433                                         return -EINVAL;
1434                         }
1435                         break;
1436
1437                 case BPF_JMP | BPF_TAIL_CALL:
1438                         if (imm32)
1439                                 emit_bpf_tail_call_direct(&bpf_prog->aux->poke_tab[imm32 - 1],
1440                                                           &prog, image + addrs[i - 1],
1441                                                           callee_regs_used,
1442                                                           bpf_prog->aux->stack_depth,
1443                                                           ctx);
1444                         else
1445                                 emit_bpf_tail_call_indirect(&prog,
1446                                                             callee_regs_used,
1447                                                             bpf_prog->aux->stack_depth,
1448                                                             image + addrs[i - 1],
1449                                                             ctx);
1450                         break;
1451
1452                         /* cond jump */
1453                 case BPF_JMP | BPF_JEQ | BPF_X:
1454                 case BPF_JMP | BPF_JNE | BPF_X:
1455                 case BPF_JMP | BPF_JGT | BPF_X:
1456                 case BPF_JMP | BPF_JLT | BPF_X:
1457                 case BPF_JMP | BPF_JGE | BPF_X:
1458                 case BPF_JMP | BPF_JLE | BPF_X:
1459                 case BPF_JMP | BPF_JSGT | BPF_X:
1460                 case BPF_JMP | BPF_JSLT | BPF_X:
1461                 case BPF_JMP | BPF_JSGE | BPF_X:
1462                 case BPF_JMP | BPF_JSLE | BPF_X:
1463                 case BPF_JMP32 | BPF_JEQ | BPF_X:
1464                 case BPF_JMP32 | BPF_JNE | BPF_X:
1465                 case BPF_JMP32 | BPF_JGT | BPF_X:
1466                 case BPF_JMP32 | BPF_JLT | BPF_X:
1467                 case BPF_JMP32 | BPF_JGE | BPF_X:
1468                 case BPF_JMP32 | BPF_JLE | BPF_X:
1469                 case BPF_JMP32 | BPF_JSGT | BPF_X:
1470                 case BPF_JMP32 | BPF_JSLT | BPF_X:
1471                 case BPF_JMP32 | BPF_JSGE | BPF_X:
1472                 case BPF_JMP32 | BPF_JSLE | BPF_X:
1473                         /* cmp dst_reg, src_reg */
1474                         maybe_emit_mod(&prog, dst_reg, src_reg,
1475                                        BPF_CLASS(insn->code) == BPF_JMP);
1476                         EMIT2(0x39, add_2reg(0xC0, dst_reg, src_reg));
1477                         goto emit_cond_jmp;
1478
1479                 case BPF_JMP | BPF_JSET | BPF_X:
1480                 case BPF_JMP32 | BPF_JSET | BPF_X:
1481                         /* test dst_reg, src_reg */
1482                         maybe_emit_mod(&prog, dst_reg, src_reg,
1483                                        BPF_CLASS(insn->code) == BPF_JMP);
1484                         EMIT2(0x85, add_2reg(0xC0, dst_reg, src_reg));
1485                         goto emit_cond_jmp;
1486
1487                 case BPF_JMP | BPF_JSET | BPF_K:
1488                 case BPF_JMP32 | BPF_JSET | BPF_K:
1489                         /* test dst_reg, imm32 */
1490                         maybe_emit_1mod(&prog, dst_reg,
1491                                         BPF_CLASS(insn->code) == BPF_JMP);
1492                         EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg), imm32);
1493                         goto emit_cond_jmp;
1494
1495                 case BPF_JMP | BPF_JEQ | BPF_K:
1496                 case BPF_JMP | BPF_JNE | BPF_K:
1497                 case BPF_JMP | BPF_JGT | BPF_K:
1498                 case BPF_JMP | BPF_JLT | BPF_K:
1499                 case BPF_JMP | BPF_JGE | BPF_K:
1500                 case BPF_JMP | BPF_JLE | BPF_K:
1501                 case BPF_JMP | BPF_JSGT | BPF_K:
1502                 case BPF_JMP | BPF_JSLT | BPF_K:
1503                 case BPF_JMP | BPF_JSGE | BPF_K:
1504                 case BPF_JMP | BPF_JSLE | BPF_K:
1505                 case BPF_JMP32 | BPF_JEQ | BPF_K:
1506                 case BPF_JMP32 | BPF_JNE | BPF_K:
1507                 case BPF_JMP32 | BPF_JGT | BPF_K:
1508                 case BPF_JMP32 | BPF_JLT | BPF_K:
1509                 case BPF_JMP32 | BPF_JGE | BPF_K:
1510                 case BPF_JMP32 | BPF_JLE | BPF_K:
1511                 case BPF_JMP32 | BPF_JSGT | BPF_K:
1512                 case BPF_JMP32 | BPF_JSLT | BPF_K:
1513                 case BPF_JMP32 | BPF_JSGE | BPF_K:
1514                 case BPF_JMP32 | BPF_JSLE | BPF_K:
1515                         /* test dst_reg, dst_reg to save one extra byte */
1516                         if (imm32 == 0) {
1517                                 maybe_emit_mod(&prog, dst_reg, dst_reg,
1518                                                BPF_CLASS(insn->code) == BPF_JMP);
1519                                 EMIT2(0x85, add_2reg(0xC0, dst_reg, dst_reg));
1520                                 goto emit_cond_jmp;
1521                         }
1522
1523                         /* cmp dst_reg, imm8/32 */
1524                         maybe_emit_1mod(&prog, dst_reg,
1525                                         BPF_CLASS(insn->code) == BPF_JMP);
1526
1527                         if (is_imm8(imm32))
1528                                 EMIT3(0x83, add_1reg(0xF8, dst_reg), imm32);
1529                         else
1530                                 EMIT2_off32(0x81, add_1reg(0xF8, dst_reg), imm32);
1531
1532 emit_cond_jmp:          /* Convert BPF opcode to x86 */
1533                         switch (BPF_OP(insn->code)) {
1534                         case BPF_JEQ:
1535                                 jmp_cond = X86_JE;
1536                                 break;
1537                         case BPF_JSET:
1538                         case BPF_JNE:
1539                                 jmp_cond = X86_JNE;
1540                                 break;
1541                         case BPF_JGT:
1542                                 /* GT is unsigned '>', JA in x86 */
1543                                 jmp_cond = X86_JA;
1544                                 break;
1545                         case BPF_JLT:
1546                                 /* LT is unsigned '<', JB in x86 */
1547                                 jmp_cond = X86_JB;
1548                                 break;
1549                         case BPF_JGE:
1550                                 /* GE is unsigned '>=', JAE in x86 */
1551                                 jmp_cond = X86_JAE;
1552                                 break;
1553                         case BPF_JLE:
1554                                 /* LE is unsigned '<=', JBE in x86 */
1555                                 jmp_cond = X86_JBE;
1556                                 break;
1557                         case BPF_JSGT:
1558                                 /* Signed '>', GT in x86 */
1559                                 jmp_cond = X86_JG;
1560                                 break;
1561                         case BPF_JSLT:
1562                                 /* Signed '<', LT in x86 */
1563                                 jmp_cond = X86_JL;
1564                                 break;
1565                         case BPF_JSGE:
1566                                 /* Signed '>=', GE in x86 */
1567                                 jmp_cond = X86_JGE;
1568                                 break;
1569                         case BPF_JSLE:
1570                                 /* Signed '<=', LE in x86 */
1571                                 jmp_cond = X86_JLE;
1572                                 break;
1573                         default: /* to silence GCC warning */
1574                                 return -EFAULT;
1575                         }
1576                         jmp_offset = addrs[i + insn->off] - addrs[i];
1577                         if (is_imm8(jmp_offset)) {
1578                                 if (jmp_padding) {
1579                                         /* To keep the jmp_offset valid, the extra bytes are
1580                                          * padded before the jump insn, so we subtract the
1581                                          * 2 bytes of jmp_cond insn from INSN_SZ_DIFF.
1582                                          *
1583                                          * If the previous pass already emits an imm8
1584                                          * jmp_cond, then this BPF insn won't shrink, so
1585                                          * "nops" is 0.
1586                                          *
1587                                          * On the other hand, if the previous pass emits an
1588                                          * imm32 jmp_cond, the extra 4 bytes(*) is padded to
1589                                          * keep the image from shrinking further.
1590                                          *
1591                                          * (*) imm32 jmp_cond is 6 bytes, and imm8 jmp_cond
1592                                          *     is 2 bytes, so the size difference is 4 bytes.
1593                                          */
1594                                         nops = INSN_SZ_DIFF - 2;
1595                                         if (nops != 0 && nops != 4) {
1596                                                 pr_err("unexpected jmp_cond padding: %d bytes\n",
1597                                                        nops);
1598                                                 return -EFAULT;
1599                                         }
1600                                         emit_nops(&prog, nops);
1601                                 }
1602                                 EMIT2(jmp_cond, jmp_offset);
1603                         } else if (is_simm32(jmp_offset)) {
1604                                 EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset);
1605                         } else {
1606                                 pr_err("cond_jmp gen bug %llx\n", jmp_offset);
1607                                 return -EFAULT;
1608                         }
1609
1610                         break;
1611
1612                 case BPF_JMP | BPF_JA:
1613                         if (insn->off == -1)
1614                                 /* -1 jmp instructions will always jump
1615                                  * backwards two bytes. Explicitly handling
1616                                  * this case avoids wasting too many passes
1617                                  * when there are long sequences of replaced
1618                                  * dead code.
1619                                  */
1620                                 jmp_offset = -2;
1621                         else
1622                                 jmp_offset = addrs[i + insn->off] - addrs[i];
1623
1624                         if (!jmp_offset) {
1625                                 /*
1626                                  * If jmp_padding is enabled, the extra nops will
1627                                  * be inserted. Otherwise, optimize out nop jumps.
1628                                  */
1629                                 if (jmp_padding) {
1630                                         /* There are 3 possible conditions.
1631                                          * (1) This BPF_JA is already optimized out in
1632                                          *     the previous run, so there is no need
1633                                          *     to pad any extra byte (0 byte).
1634                                          * (2) The previous pass emits an imm8 jmp,
1635                                          *     so we pad 2 bytes to match the previous
1636                                          *     insn size.
1637                                          * (3) Similarly, the previous pass emits an
1638                                          *     imm32 jmp, and 5 bytes is padded.
1639                                          */
1640                                         nops = INSN_SZ_DIFF;
1641                                         if (nops != 0 && nops != 2 && nops != 5) {
1642                                                 pr_err("unexpected nop jump padding: %d bytes\n",
1643                                                        nops);
1644                                                 return -EFAULT;
1645                                         }
1646                                         emit_nops(&prog, nops);
1647                                 }
1648                                 break;
1649                         }
1650 emit_jmp:
1651                         if (is_imm8(jmp_offset)) {
1652                                 if (jmp_padding) {
1653                                         /* To avoid breaking jmp_offset, the extra bytes
1654                                          * are padded before the actual jmp insn, so
1655                                          * 2 bytes is subtracted from INSN_SZ_DIFF.
1656                                          *
1657                                          * If the previous pass already emits an imm8
1658                                          * jmp, there is nothing to pad (0 byte).
1659                                          *
1660                                          * If it emits an imm32 jmp (5 bytes) previously
1661                                          * and now an imm8 jmp (2 bytes), then we pad
1662                                          * (5 - 2 = 3) bytes to stop the image from
1663                                          * shrinking further.
1664                                          */
1665                                         nops = INSN_SZ_DIFF - 2;
1666                                         if (nops != 0 && nops != 3) {
1667                                                 pr_err("unexpected jump padding: %d bytes\n",
1668                                                        nops);
1669                                                 return -EFAULT;
1670                                         }
1671                                         emit_nops(&prog, INSN_SZ_DIFF - 2);
1672                                 }
1673                                 EMIT2(0xEB, jmp_offset);
1674                         } else if (is_simm32(jmp_offset)) {
1675                                 EMIT1_off32(0xE9, jmp_offset);
1676                         } else {
1677                                 pr_err("jmp gen bug %llx\n", jmp_offset);
1678                                 return -EFAULT;
1679                         }
1680                         break;
1681
1682                 case BPF_JMP | BPF_EXIT:
1683                         if (seen_exit) {
1684                                 jmp_offset = ctx->cleanup_addr - addrs[i];
1685                                 goto emit_jmp;
1686                         }
1687                         seen_exit = true;
1688                         /* Update cleanup_addr */
1689                         ctx->cleanup_addr = proglen;
1690                         pop_callee_regs(&prog, callee_regs_used);
1691                         EMIT1(0xC9);         /* leave */
1692                         emit_return(&prog, image + addrs[i - 1] + (prog - temp));
1693                         break;
1694
1695                 default:
1696                         /*
1697                          * By design x86-64 JIT should support all BPF instructions.
1698                          * This error will be seen if new instruction was added
1699                          * to the interpreter, but not to the JIT, or if there is
1700                          * junk in bpf_prog.
1701                          */
1702                         pr_err("bpf_jit: unknown opcode %02x\n", insn->code);
1703                         return -EINVAL;
1704                 }
1705
1706                 ilen = prog - temp;
1707                 if (ilen > BPF_MAX_INSN_SIZE) {
1708                         pr_err("bpf_jit: fatal insn size error\n");
1709                         return -EFAULT;
1710                 }
1711
1712                 if (image) {
1713                         /*
1714                          * When populating the image, assert that:
1715                          *
1716                          *  i) We do not write beyond the allocated space, and
1717                          * ii) addrs[i] did not change from the prior run, in order
1718                          *     to validate assumptions made for computing branch
1719                          *     displacements.
1720                          */
1721                         if (unlikely(proglen + ilen > oldproglen ||
1722                                      proglen + ilen != addrs[i])) {
1723                                 pr_err("bpf_jit: fatal error\n");
1724                                 return -EFAULT;
1725                         }
1726                         memcpy(image + proglen, temp, ilen);
1727                 }
1728                 proglen += ilen;
1729                 addrs[i] = proglen;
1730                 prog = temp;
1731         }
1732
1733         if (image && excnt != bpf_prog->aux->num_exentries) {
1734                 pr_err("extable is not populated\n");
1735                 return -EFAULT;
1736         }
1737         return proglen;
1738 }
1739
1740 static void save_regs(const struct btf_func_model *m, u8 **prog, int nr_args,
1741                       int stack_size)
1742 {
1743         int i;
1744         /* Store function arguments to stack.
1745          * For a function that accepts two pointers the sequence will be:
1746          * mov QWORD PTR [rbp-0x10],rdi
1747          * mov QWORD PTR [rbp-0x8],rsi
1748          */
1749         for (i = 0; i < min(nr_args, 6); i++)
1750                 emit_stx(prog, bytes_to_bpf_size(m->arg_size[i]),
1751                          BPF_REG_FP,
1752                          i == 5 ? X86_REG_R9 : BPF_REG_1 + i,
1753                          -(stack_size - i * 8));
1754 }
1755
1756 static void restore_regs(const struct btf_func_model *m, u8 **prog, int nr_args,
1757                          int stack_size)
1758 {
1759         int i;
1760
1761         /* Restore function arguments from stack.
1762          * For a function that accepts two pointers the sequence will be:
1763          * EMIT4(0x48, 0x8B, 0x7D, 0xF0); mov rdi,QWORD PTR [rbp-0x10]
1764          * EMIT4(0x48, 0x8B, 0x75, 0xF8); mov rsi,QWORD PTR [rbp-0x8]
1765          */
1766         for (i = 0; i < min(nr_args, 6); i++)
1767                 emit_ldx(prog, bytes_to_bpf_size(m->arg_size[i]),
1768                          i == 5 ? X86_REG_R9 : BPF_REG_1 + i,
1769                          BPF_REG_FP,
1770                          -(stack_size - i * 8));
1771 }
1772
1773 static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
1774                            struct bpf_prog *p, int stack_size, bool save_ret)
1775 {
1776         u8 *prog = *pprog;
1777         u8 *jmp_insn;
1778
1779         /* arg1: mov rdi, progs[i] */
1780         emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p);
1781         if (emit_call(&prog,
1782                       p->aux->sleepable ? __bpf_prog_enter_sleepable :
1783                       __bpf_prog_enter, prog))
1784                         return -EINVAL;
1785         /* remember prog start time returned by __bpf_prog_enter */
1786         emit_mov_reg(&prog, true, BPF_REG_6, BPF_REG_0);
1787
1788         /* if (__bpf_prog_enter*(prog) == 0)
1789          *      goto skip_exec_of_prog;
1790          */
1791         EMIT3(0x48, 0x85, 0xC0);  /* test rax,rax */
1792         /* emit 2 nops that will be replaced with JE insn */
1793         jmp_insn = prog;
1794         emit_nops(&prog, 2);
1795
1796         /* arg1: lea rdi, [rbp - stack_size] */
1797         EMIT4(0x48, 0x8D, 0x7D, -stack_size);
1798         /* arg2: progs[i]->insnsi for interpreter */
1799         if (!p->jited)
1800                 emit_mov_imm64(&prog, BPF_REG_2,
1801                                (long) p->insnsi >> 32,
1802                                (u32) (long) p->insnsi);
1803         /* call JITed bpf program or interpreter */
1804         if (emit_call(&prog, p->bpf_func, prog))
1805                 return -EINVAL;
1806
1807         /*
1808          * BPF_TRAMP_MODIFY_RETURN trampolines can modify the return
1809          * of the previous call which is then passed on the stack to
1810          * the next BPF program.
1811          *
1812          * BPF_TRAMP_FENTRY trampoline may need to return the return
1813          * value of BPF_PROG_TYPE_STRUCT_OPS prog.
1814          */
1815         if (save_ret)
1816                 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
1817
1818         /* replace 2 nops with JE insn, since jmp target is known */
1819         jmp_insn[0] = X86_JE;
1820         jmp_insn[1] = prog - jmp_insn - 2;
1821
1822         /* arg1: mov rdi, progs[i] */
1823         emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p);
1824         /* arg2: mov rsi, rbx <- start time in nsec */
1825         emit_mov_reg(&prog, true, BPF_REG_2, BPF_REG_6);
1826         if (emit_call(&prog,
1827                       p->aux->sleepable ? __bpf_prog_exit_sleepable :
1828                       __bpf_prog_exit, prog))
1829                         return -EINVAL;
1830
1831         *pprog = prog;
1832         return 0;
1833 }
1834
1835 static void emit_align(u8 **pprog, u32 align)
1836 {
1837         u8 *target, *prog = *pprog;
1838
1839         target = PTR_ALIGN(prog, align);
1840         if (target != prog)
1841                 emit_nops(&prog, target - prog);
1842
1843         *pprog = prog;
1844 }
1845
1846 static int emit_cond_near_jump(u8 **pprog, void *func, void *ip, u8 jmp_cond)
1847 {
1848         u8 *prog = *pprog;
1849         s64 offset;
1850
1851         offset = func - (ip + 2 + 4);
1852         if (!is_simm32(offset)) {
1853                 pr_err("Target %p is out of range\n", func);
1854                 return -EINVAL;
1855         }
1856         EMIT2_off32(0x0F, jmp_cond + 0x10, offset);
1857         *pprog = prog;
1858         return 0;
1859 }
1860
1861 static int invoke_bpf(const struct btf_func_model *m, u8 **pprog,
1862                       struct bpf_tramp_progs *tp, int stack_size,
1863                       bool save_ret)
1864 {
1865         int i;
1866         u8 *prog = *pprog;
1867
1868         for (i = 0; i < tp->nr_progs; i++) {
1869                 if (invoke_bpf_prog(m, &prog, tp->progs[i], stack_size,
1870                                     save_ret))
1871                         return -EINVAL;
1872         }
1873         *pprog = prog;
1874         return 0;
1875 }
1876
1877 static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog,
1878                               struct bpf_tramp_progs *tp, int stack_size,
1879                               u8 **branches)
1880 {
1881         u8 *prog = *pprog;
1882         int i;
1883
1884         /* The first fmod_ret program will receive a garbage return value.
1885          * Set this to 0 to avoid confusing the program.
1886          */
1887         emit_mov_imm32(&prog, false, BPF_REG_0, 0);
1888         emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
1889         for (i = 0; i < tp->nr_progs; i++) {
1890                 if (invoke_bpf_prog(m, &prog, tp->progs[i], stack_size, true))
1891                         return -EINVAL;
1892
1893                 /* mod_ret prog stored return value into [rbp - 8]. Emit:
1894                  * if (*(u64 *)(rbp - 8) !=  0)
1895                  *      goto do_fexit;
1896                  */
1897                 /* cmp QWORD PTR [rbp - 0x8], 0x0 */
1898                 EMIT4(0x48, 0x83, 0x7d, 0xf8); EMIT1(0x00);
1899
1900                 /* Save the location of the branch and Generate 6 nops
1901                  * (4 bytes for an offset and 2 bytes for the jump) These nops
1902                  * are replaced with a conditional jump once do_fexit (i.e. the
1903                  * start of the fexit invocation) is finalized.
1904                  */
1905                 branches[i] = prog;
1906                 emit_nops(&prog, 4 + 2);
1907         }
1908
1909         *pprog = prog;
1910         return 0;
1911 }
1912
1913 static bool is_valid_bpf_tramp_flags(unsigned int flags)
1914 {
1915         if ((flags & BPF_TRAMP_F_RESTORE_REGS) &&
1916             (flags & BPF_TRAMP_F_SKIP_FRAME))
1917                 return false;
1918
1919         /*
1920          * BPF_TRAMP_F_RET_FENTRY_RET is only used by bpf_struct_ops,
1921          * and it must be used alone.
1922          */
1923         if ((flags & BPF_TRAMP_F_RET_FENTRY_RET) &&
1924             (flags & ~BPF_TRAMP_F_RET_FENTRY_RET))
1925                 return false;
1926
1927         return true;
1928 }
1929
1930 /* Example:
1931  * __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
1932  * its 'struct btf_func_model' will be nr_args=2
1933  * The assembly code when eth_type_trans is executing after trampoline:
1934  *
1935  * push rbp
1936  * mov rbp, rsp
1937  * sub rsp, 16                     // space for skb and dev
1938  * push rbx                        // temp regs to pass start time
1939  * mov qword ptr [rbp - 16], rdi   // save skb pointer to stack
1940  * mov qword ptr [rbp - 8], rsi    // save dev pointer to stack
1941  * call __bpf_prog_enter           // rcu_read_lock and preempt_disable
1942  * mov rbx, rax                    // remember start time in bpf stats are enabled
1943  * lea rdi, [rbp - 16]             // R1==ctx of bpf prog
1944  * call addr_of_jited_FENTRY_prog
1945  * movabsq rdi, 64bit_addr_of_struct_bpf_prog  // unused if bpf stats are off
1946  * mov rsi, rbx                    // prog start time
1947  * call __bpf_prog_exit            // rcu_read_unlock, preempt_enable and stats math
1948  * mov rdi, qword ptr [rbp - 16]   // restore skb pointer from stack
1949  * mov rsi, qword ptr [rbp - 8]    // restore dev pointer from stack
1950  * pop rbx
1951  * leave
1952  * ret
1953  *
1954  * eth_type_trans has 5 byte nop at the beginning. These 5 bytes will be
1955  * replaced with 'call generated_bpf_trampoline'. When it returns
1956  * eth_type_trans will continue executing with original skb and dev pointers.
1957  *
1958  * The assembly code when eth_type_trans is called from trampoline:
1959  *
1960  * push rbp
1961  * mov rbp, rsp
1962  * sub rsp, 24                     // space for skb, dev, return value
1963  * push rbx                        // temp regs to pass start time
1964  * mov qword ptr [rbp - 24], rdi   // save skb pointer to stack
1965  * mov qword ptr [rbp - 16], rsi   // save dev pointer to stack
1966  * call __bpf_prog_enter           // rcu_read_lock and preempt_disable
1967  * mov rbx, rax                    // remember start time if bpf stats are enabled
1968  * lea rdi, [rbp - 24]             // R1==ctx of bpf prog
1969  * call addr_of_jited_FENTRY_prog  // bpf prog can access skb and dev
1970  * movabsq rdi, 64bit_addr_of_struct_bpf_prog  // unused if bpf stats are off
1971  * mov rsi, rbx                    // prog start time
1972  * call __bpf_prog_exit            // rcu_read_unlock, preempt_enable and stats math
1973  * mov rdi, qword ptr [rbp - 24]   // restore skb pointer from stack
1974  * mov rsi, qword ptr [rbp - 16]   // restore dev pointer from stack
1975  * call eth_type_trans+5           // execute body of eth_type_trans
1976  * mov qword ptr [rbp - 8], rax    // save return value
1977  * call __bpf_prog_enter           // rcu_read_lock and preempt_disable
1978  * mov rbx, rax                    // remember start time in bpf stats are enabled
1979  * lea rdi, [rbp - 24]             // R1==ctx of bpf prog
1980  * call addr_of_jited_FEXIT_prog   // bpf prog can access skb, dev, return value
1981  * movabsq rdi, 64bit_addr_of_struct_bpf_prog  // unused if bpf stats are off
1982  * mov rsi, rbx                    // prog start time
1983  * call __bpf_prog_exit            // rcu_read_unlock, preempt_enable and stats math
1984  * mov rax, qword ptr [rbp - 8]    // restore eth_type_trans's return value
1985  * pop rbx
1986  * leave
1987  * add rsp, 8                      // skip eth_type_trans's frame
1988  * ret                             // return to its caller
1989  */
1990 int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end,
1991                                 const struct btf_func_model *m, u32 flags,
1992                                 struct bpf_tramp_progs *tprogs,
1993                                 void *orig_call)
1994 {
1995         int ret, i, nr_args = m->nr_args;
1996         int stack_size = nr_args * 8;
1997         struct bpf_tramp_progs *fentry = &tprogs[BPF_TRAMP_FENTRY];
1998         struct bpf_tramp_progs *fexit = &tprogs[BPF_TRAMP_FEXIT];
1999         struct bpf_tramp_progs *fmod_ret = &tprogs[BPF_TRAMP_MODIFY_RETURN];
2000         u8 **branches = NULL;
2001         u8 *prog;
2002         bool save_ret;
2003
2004         /* x86-64 supports up to 6 arguments. 7+ can be added in the future */
2005         if (nr_args > 6)
2006                 return -ENOTSUPP;
2007
2008         if (!is_valid_bpf_tramp_flags(flags))
2009                 return -EINVAL;
2010
2011         /* room for return value of orig_call or fentry prog */
2012         save_ret = flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET);
2013         if (save_ret)
2014                 stack_size += 8;
2015
2016         if (flags & BPF_TRAMP_F_IP_ARG)
2017                 stack_size += 8; /* room for IP address argument */
2018
2019         if (flags & BPF_TRAMP_F_SKIP_FRAME)
2020                 /* skip patched call instruction and point orig_call to actual
2021                  * body of the kernel function.
2022                  */
2023                 orig_call += X86_PATCH_SIZE;
2024
2025         prog = image;
2026
2027         EMIT1(0x55);             /* push rbp */
2028         EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */
2029         EMIT4(0x48, 0x83, 0xEC, stack_size); /* sub rsp, stack_size */
2030         EMIT1(0x53);             /* push rbx */
2031
2032         if (flags & BPF_TRAMP_F_IP_ARG) {
2033                 /* Store IP address of the traced function:
2034                  * mov rax, QWORD PTR [rbp + 8]
2035                  * sub rax, X86_PATCH_SIZE
2036                  * mov QWORD PTR [rbp - stack_size], rax
2037                  */
2038                 emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, 8);
2039                 EMIT4(0x48, 0x83, 0xe8, X86_PATCH_SIZE);
2040                 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -stack_size);
2041
2042                 /* Continue with stack_size for regs storage, stack will
2043                  * be correctly restored with 'leave' instruction.
2044                  */
2045                 stack_size -= 8;
2046         }
2047
2048         save_regs(m, &prog, nr_args, stack_size);
2049
2050         if (flags & BPF_TRAMP_F_CALL_ORIG) {
2051                 /* arg1: mov rdi, im */
2052                 emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im);
2053                 if (emit_call(&prog, __bpf_tramp_enter, prog)) {
2054                         ret = -EINVAL;
2055                         goto cleanup;
2056                 }
2057         }
2058
2059         if (fentry->nr_progs)
2060                 if (invoke_bpf(m, &prog, fentry, stack_size,
2061                                flags & BPF_TRAMP_F_RET_FENTRY_RET))
2062                         return -EINVAL;
2063
2064         if (fmod_ret->nr_progs) {
2065                 branches = kcalloc(fmod_ret->nr_progs, sizeof(u8 *),
2066                                    GFP_KERNEL);
2067                 if (!branches)
2068                         return -ENOMEM;
2069
2070                 if (invoke_bpf_mod_ret(m, &prog, fmod_ret, stack_size,
2071                                        branches)) {
2072                         ret = -EINVAL;
2073                         goto cleanup;
2074                 }
2075         }
2076
2077         if (flags & BPF_TRAMP_F_CALL_ORIG) {
2078                 restore_regs(m, &prog, nr_args, stack_size);
2079
2080                 /* call original function */
2081                 if (emit_call(&prog, orig_call, prog)) {
2082                         ret = -EINVAL;
2083                         goto cleanup;
2084                 }
2085                 /* remember return value in a stack for bpf prog to access */
2086                 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
2087                 im->ip_after_call = prog;
2088                 memcpy(prog, x86_nops[5], X86_PATCH_SIZE);
2089                 prog += X86_PATCH_SIZE;
2090         }
2091
2092         if (fmod_ret->nr_progs) {
2093                 /* From Intel 64 and IA-32 Architectures Optimization
2094                  * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler
2095                  * Coding Rule 11: All branch targets should be 16-byte
2096                  * aligned.
2097                  */
2098                 emit_align(&prog, 16);
2099                 /* Update the branches saved in invoke_bpf_mod_ret with the
2100                  * aligned address of do_fexit.
2101                  */
2102                 for (i = 0; i < fmod_ret->nr_progs; i++)
2103                         emit_cond_near_jump(&branches[i], prog, branches[i],
2104                                             X86_JNE);
2105         }
2106
2107         if (fexit->nr_progs)
2108                 if (invoke_bpf(m, &prog, fexit, stack_size, false)) {
2109                         ret = -EINVAL;
2110                         goto cleanup;
2111                 }
2112
2113         if (flags & BPF_TRAMP_F_RESTORE_REGS)
2114                 restore_regs(m, &prog, nr_args, stack_size);
2115
2116         /* This needs to be done regardless. If there were fmod_ret programs,
2117          * the return value is only updated on the stack and still needs to be
2118          * restored to R0.
2119          */
2120         if (flags & BPF_TRAMP_F_CALL_ORIG) {
2121                 im->ip_epilogue = prog;
2122                 /* arg1: mov rdi, im */
2123                 emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im);
2124                 if (emit_call(&prog, __bpf_tramp_exit, prog)) {
2125                         ret = -EINVAL;
2126                         goto cleanup;
2127                 }
2128         }
2129         /* restore return value of orig_call or fentry prog back into RAX */
2130         if (save_ret)
2131                 emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8);
2132
2133         EMIT1(0x5B); /* pop rbx */
2134         EMIT1(0xC9); /* leave */
2135         if (flags & BPF_TRAMP_F_SKIP_FRAME)
2136                 /* skip our return address and return to parent */
2137                 EMIT4(0x48, 0x83, 0xC4, 8); /* add rsp, 8 */
2138         emit_return(&prog, prog);
2139         /* Make sure the trampoline generation logic doesn't overflow */
2140         if (WARN_ON_ONCE(prog > (u8 *)image_end - BPF_INSN_SAFETY)) {
2141                 ret = -EFAULT;
2142                 goto cleanup;
2143         }
2144         ret = prog - (u8 *)image;
2145
2146 cleanup:
2147         kfree(branches);
2148         return ret;
2149 }
2150
2151 static int emit_bpf_dispatcher(u8 **pprog, int a, int b, s64 *progs)
2152 {
2153         u8 *jg_reloc, *prog = *pprog;
2154         int pivot, err, jg_bytes = 1;
2155         s64 jg_offset;
2156
2157         if (a == b) {
2158                 /* Leaf node of recursion, i.e. not a range of indices
2159                  * anymore.
2160                  */
2161                 EMIT1(add_1mod(0x48, BPF_REG_3));       /* cmp rdx,func */
2162                 if (!is_simm32(progs[a]))
2163                         return -1;
2164                 EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3),
2165                             progs[a]);
2166                 err = emit_cond_near_jump(&prog,        /* je func */
2167                                           (void *)progs[a], prog,
2168                                           X86_JE);
2169                 if (err)
2170                         return err;
2171
2172                 emit_indirect_jump(&prog, 2 /* rdx */, prog);
2173
2174                 *pprog = prog;
2175                 return 0;
2176         }
2177
2178         /* Not a leaf node, so we pivot, and recursively descend into
2179          * the lower and upper ranges.
2180          */
2181         pivot = (b - a) / 2;
2182         EMIT1(add_1mod(0x48, BPF_REG_3));               /* cmp rdx,func */
2183         if (!is_simm32(progs[a + pivot]))
2184                 return -1;
2185         EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3), progs[a + pivot]);
2186
2187         if (pivot > 2) {                                /* jg upper_part */
2188                 /* Require near jump. */
2189                 jg_bytes = 4;
2190                 EMIT2_off32(0x0F, X86_JG + 0x10, 0);
2191         } else {
2192                 EMIT2(X86_JG, 0);
2193         }
2194         jg_reloc = prog;
2195
2196         err = emit_bpf_dispatcher(&prog, a, a + pivot,  /* emit lower_part */
2197                                   progs);
2198         if (err)
2199                 return err;
2200
2201         /* From Intel 64 and IA-32 Architectures Optimization
2202          * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler
2203          * Coding Rule 11: All branch targets should be 16-byte
2204          * aligned.
2205          */
2206         emit_align(&prog, 16);
2207         jg_offset = prog - jg_reloc;
2208         emit_code(jg_reloc - jg_bytes, jg_offset, jg_bytes);
2209
2210         err = emit_bpf_dispatcher(&prog, a + pivot + 1, /* emit upper_part */
2211                                   b, progs);
2212         if (err)
2213                 return err;
2214
2215         *pprog = prog;
2216         return 0;
2217 }
2218
2219 static int cmp_ips(const void *a, const void *b)
2220 {
2221         const s64 *ipa = a;
2222         const s64 *ipb = b;
2223
2224         if (*ipa > *ipb)
2225                 return 1;
2226         if (*ipa < *ipb)
2227                 return -1;
2228         return 0;
2229 }
2230
2231 int arch_prepare_bpf_dispatcher(void *image, s64 *funcs, int num_funcs)
2232 {
2233         u8 *prog = image;
2234
2235         sort(funcs, num_funcs, sizeof(funcs[0]), cmp_ips, NULL);
2236         return emit_bpf_dispatcher(&prog, 0, num_funcs - 1, funcs);
2237 }
2238
2239 struct x64_jit_data {
2240         struct bpf_binary_header *header;
2241         int *addrs;
2242         u8 *image;
2243         int proglen;
2244         struct jit_context ctx;
2245 };
2246
2247 #define MAX_PASSES 20
2248 #define PADDING_PASSES (MAX_PASSES - 5)
2249
2250 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
2251 {
2252         struct bpf_binary_header *header = NULL;
2253         struct bpf_prog *tmp, *orig_prog = prog;
2254         struct x64_jit_data *jit_data;
2255         int proglen, oldproglen = 0;
2256         struct jit_context ctx = {};
2257         bool tmp_blinded = false;
2258         bool extra_pass = false;
2259         bool padding = false;
2260         u8 *image = NULL;
2261         int *addrs;
2262         int pass;
2263         int i;
2264
2265         if (!prog->jit_requested)
2266                 return orig_prog;
2267
2268         tmp = bpf_jit_blind_constants(prog);
2269         /*
2270          * If blinding was requested and we failed during blinding,
2271          * we must fall back to the interpreter.
2272          */
2273         if (IS_ERR(tmp))
2274                 return orig_prog;
2275         if (tmp != prog) {
2276                 tmp_blinded = true;
2277                 prog = tmp;
2278         }
2279
2280         jit_data = prog->aux->jit_data;
2281         if (!jit_data) {
2282                 jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
2283                 if (!jit_data) {
2284                         prog = orig_prog;
2285                         goto out;
2286                 }
2287                 prog->aux->jit_data = jit_data;
2288         }
2289         addrs = jit_data->addrs;
2290         if (addrs) {
2291                 ctx = jit_data->ctx;
2292                 oldproglen = jit_data->proglen;
2293                 image = jit_data->image;
2294                 header = jit_data->header;
2295                 extra_pass = true;
2296                 padding = true;
2297                 goto skip_init_addrs;
2298         }
2299         addrs = kvmalloc_array(prog->len + 1, sizeof(*addrs), GFP_KERNEL);
2300         if (!addrs) {
2301                 prog = orig_prog;
2302                 goto out_addrs;
2303         }
2304
2305         /*
2306          * Before first pass, make a rough estimation of addrs[]
2307          * each BPF instruction is translated to less than 64 bytes
2308          */
2309         for (proglen = 0, i = 0; i <= prog->len; i++) {
2310                 proglen += 64;
2311                 addrs[i] = proglen;
2312         }
2313         ctx.cleanup_addr = proglen;
2314 skip_init_addrs:
2315
2316         /*
2317          * JITed image shrinks with every pass and the loop iterates
2318          * until the image stops shrinking. Very large BPF programs
2319          * may converge on the last pass. In such case do one more
2320          * pass to emit the final image.
2321          */
2322         for (pass = 0; pass < MAX_PASSES || image; pass++) {
2323                 if (!padding && pass >= PADDING_PASSES)
2324                         padding = true;
2325                 proglen = do_jit(prog, addrs, image, oldproglen, &ctx, padding);
2326                 if (proglen <= 0) {
2327 out_image:
2328                         image = NULL;
2329                         if (header)
2330                                 bpf_jit_binary_free(header);
2331                         prog = orig_prog;
2332                         goto out_addrs;
2333                 }
2334                 if (image) {
2335                         if (proglen != oldproglen) {
2336                                 pr_err("bpf_jit: proglen=%d != oldproglen=%d\n",
2337                                        proglen, oldproglen);
2338                                 goto out_image;
2339                         }
2340                         break;
2341                 }
2342                 if (proglen == oldproglen) {
2343                         /*
2344                          * The number of entries in extable is the number of BPF_LDX
2345                          * insns that access kernel memory via "pointer to BTF type".
2346                          * The verifier changed their opcode from LDX|MEM|size
2347                          * to LDX|PROBE_MEM|size to make JITing easier.
2348                          */
2349                         u32 align = __alignof__(struct exception_table_entry);
2350                         u32 extable_size = prog->aux->num_exentries *
2351                                 sizeof(struct exception_table_entry);
2352
2353                         /* allocate module memory for x86 insns and extable */
2354                         header = bpf_jit_binary_alloc(roundup(proglen, align) + extable_size,
2355                                                       &image, align, jit_fill_hole);
2356                         if (!header) {
2357                                 prog = orig_prog;
2358                                 goto out_addrs;
2359                         }
2360                         prog->aux->extable = (void *) image + roundup(proglen, align);
2361                 }
2362                 oldproglen = proglen;
2363                 cond_resched();
2364         }
2365
2366         if (bpf_jit_enable > 1)
2367                 bpf_jit_dump(prog->len, proglen, pass + 1, image);
2368
2369         if (image) {
2370                 if (!prog->is_func || extra_pass) {
2371                         bpf_tail_call_direct_fixup(prog);
2372                         bpf_jit_binary_lock_ro(header);
2373                 } else {
2374                         jit_data->addrs = addrs;
2375                         jit_data->ctx = ctx;
2376                         jit_data->proglen = proglen;
2377                         jit_data->image = image;
2378                         jit_data->header = header;
2379                 }
2380                 prog->bpf_func = (void *)image;
2381                 prog->jited = 1;
2382                 prog->jited_len = proglen;
2383         } else {
2384                 prog = orig_prog;
2385         }
2386
2387         if (!image || !prog->is_func || extra_pass) {
2388                 if (image)
2389                         bpf_prog_fill_jited_linfo(prog, addrs + 1);
2390 out_addrs:
2391                 kvfree(addrs);
2392                 kfree(jit_data);
2393                 prog->aux->jit_data = NULL;
2394         }
2395 out:
2396         if (tmp_blinded)
2397                 bpf_jit_prog_release_other(prog, prog == orig_prog ?
2398                                            tmp : orig_prog);
2399         return prog;
2400 }
2401
2402 bool bpf_jit_supports_kfunc_call(void)
2403 {
2404         return true;
2405 }