Merge tag 'for-netdev' of https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf...
[platform/kernel/linux-rpi.git] / arch / x86 / net / bpf_jit_comp.c
index e3e2b57..8db6077 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/memory.h>
 #include <linux/sort.h>
 #include <asm/extable.h>
+#include <asm/ftrace.h>
 #include <asm/set_memory.h>
 #include <asm/nospec-branch.h>
 #include <asm/text-patching.h>
@@ -340,6 +341,13 @@ static int emit_call(u8 **pprog, void *func, void *ip)
        return emit_patch(pprog, func, ip, 0xE8);
 }
 
+static int emit_rsb_call(u8 **pprog, void *func, void *ip)
+{
+       OPTIMIZER_HIDE_VAR(func);
+       x86_call_depth_emit_accounting(pprog, func);
+       return emit_patch(pprog, func, ip, 0xE8);
+}
+
 static int emit_jump(u8 **pprog, void *func, void *ip)
 {
        return emit_patch(pprog, func, ip, 0xE9);
@@ -417,7 +425,10 @@ static void emit_indirect_jump(u8 **pprog, int reg, u8 *ip)
                EMIT2(0xFF, 0xE0 + reg);
        } else if (cpu_feature_enabled(X86_FEATURE_RETPOLINE)) {
                OPTIMIZER_HIDE_VAR(reg);
-               emit_jump(&prog, &__x86_indirect_thunk_array[reg], ip);
+               if (cpu_feature_enabled(X86_FEATURE_CALL_DEPTH))
+                       emit_jump(&prog, &__x86_indirect_jump_thunk_array[reg], ip);
+               else
+                       emit_jump(&prog, &__x86_indirect_thunk_array[reg], ip);
        } else {
                EMIT2(0xFF, 0xE0 + reg);        /* jmp *%\reg */
                if (IS_ENABLED(CONFIG_RETPOLINE) || IS_ENABLED(CONFIG_SLS))
@@ -432,7 +443,7 @@ static void emit_return(u8 **pprog, u8 *ip)
        u8 *prog = *pprog;
 
        if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) {
-               emit_jump(&prog, &__x86_return_thunk, ip);
+               emit_jump(&prog, x86_return_thunk, ip);
        } else {
                EMIT1(0xC3);            /* ret */
                if (IS_ENABLED(CONFIG_SLS))
@@ -1522,19 +1533,26 @@ st:                     if (is_imm8(insn->off))
                        break;
 
                        /* call */
-               case BPF_JMP | BPF_CALL:
+               case BPF_JMP | BPF_CALL: {
+                       int offs;
+
                        func = (u8 *) __bpf_call_base + imm32;
                        if (tail_call_reachable) {
                                /* mov rax, qword ptr [rbp - rounded_stack_depth - 8] */
                                EMIT3_off32(0x48, 0x8B, 0x85,
                                            -round_up(bpf_prog->aux->stack_depth, 8) - 8);
-                               if (!imm32 || emit_call(&prog, func, image + addrs[i - 1] + 7))
+                               if (!imm32)
                                        return -EINVAL;
+                               offs = 7 + x86_call_depth_emit_accounting(&prog, func);
                        } else {
-                               if (!imm32 || emit_call(&prog, func, image + addrs[i - 1]))
+                               if (!imm32)
                                        return -EINVAL;
+                               offs = x86_call_depth_emit_accounting(&prog, func);
                        }
+                       if (emit_call(&prog, func, image + addrs[i - 1] + offs))
+                               return -EINVAL;
                        break;
+               }
 
                case BPF_JMP | BPF_TAIL_CALL:
                        if (imm32)
@@ -1925,7 +1943,7 @@ static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
        /* arg2: lea rsi, [rbp - ctx_cookie_off] */
        EMIT4(0x48, 0x8D, 0x75, -run_ctx_off);
 
-       if (emit_call(&prog, bpf_trampoline_enter(p), prog))
+       if (emit_rsb_call(&prog, bpf_trampoline_enter(p), prog))
                return -EINVAL;
        /* remember prog start time returned by __bpf_prog_enter */
        emit_mov_reg(&prog, true, BPF_REG_6, BPF_REG_0);
@@ -1946,7 +1964,7 @@ static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
                               (long) p->insnsi >> 32,
                               (u32) (long) p->insnsi);
        /* call JITed bpf program or interpreter */
-       if (emit_call(&prog, p->bpf_func, prog))
+       if (emit_rsb_call(&prog, p->bpf_func, prog))
                return -EINVAL;
 
        /*
@@ -1970,7 +1988,7 @@ static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
        emit_mov_reg(&prog, true, BPF_REG_2, BPF_REG_6);
        /* arg3: lea rdx, [rbp - run_ctx_off] */
        EMIT4(0x48, 0x8D, 0x55, -run_ctx_off);
-       if (emit_call(&prog, bpf_trampoline_exit(p), prog))
+       if (emit_rsb_call(&prog, bpf_trampoline_exit(p), prog))
                return -EINVAL;
 
        *pprog = prog;
@@ -2192,6 +2210,11 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
        prog = image;
 
        EMIT_ENDBR();
+       /*
+        * This is the direct-call trampoline, as such it needs accounting
+        * for the __fentry__ call.
+        */
+       x86_call_depth_emit_accounting(&prog, NULL);
        EMIT1(0x55);             /* push rbp */
        EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */
        EMIT4(0x48, 0x83, 0xEC, stack_size); /* sub rsp, stack_size */
@@ -2218,7 +2241,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
        if (flags & BPF_TRAMP_F_CALL_ORIG) {
                /* arg1: mov rdi, im */
                emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im);
-               if (emit_call(&prog, __bpf_tramp_enter, prog)) {
+               if (emit_rsb_call(&prog, __bpf_tramp_enter, prog)) {
                        ret = -EINVAL;
                        goto cleanup;
                }
@@ -2250,7 +2273,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
                        EMIT2(0xff, 0xd0); /* call *rax */
                } else {
                        /* call original function */
-                       if (emit_call(&prog, orig_call, prog)) {
+                       if (emit_rsb_call(&prog, orig_call, prog)) {
                                ret = -EINVAL;
                                goto cleanup;
                        }
@@ -2294,7 +2317,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
                im->ip_epilogue = prog;
                /* arg1: mov rdi, im */
                emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im);
-               if (emit_call(&prog, __bpf_tramp_exit, prog)) {
+               if (emit_rsb_call(&prog, __bpf_tramp_exit, prog)) {
                        ret = -EINVAL;
                        goto cleanup;
                }