bpf: Add bpf_get_func_ip helper for tracing programs
authorJiri Olsa <jolsa@redhat.com>
Wed, 14 Jul 2021 09:43:55 +0000 (11:43 +0200)
committerAlexei Starovoitov <ast@kernel.org>
Fri, 16 Jul 2021 00:58:41 +0000 (17:58 -0700)
Adding bpf_get_func_ip helper for BPF_PROG_TYPE_TRACING programs,
specifically for all trampoline attach types.

The trampoline's caller IP address is stored in (ctx - 8) address.
so there's no reason to actually call the helper, but rather fixup
the call instruction and return [ctx - 8] value directly.

Signed-off-by: Jiri Olsa <jolsa@kernel.org>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/20210714094400.396467-4-jolsa@kernel.org
include/uapi/linux/bpf.h
kernel/bpf/verifier.c
kernel/trace/bpf_trace.c
tools/include/uapi/linux/bpf.h

index 3544ec5234f09cbd7f1782b22a44b531a595b7f9..89688f16ad60c4e488f6cf8844665ff09d1aa83e 100644 (file)
@@ -4841,6 +4841,12 @@ union bpf_attr {
  *             **-EINVAL** if *timer* was not initialized with bpf_timer_init() earlier.
  *             **-EDEADLK** if callback_fn tried to call bpf_timer_cancel() on its
  *             own timer which would have led to a deadlock otherwise.
+ *
+ * u64 bpf_get_func_ip(void *ctx)
+ *     Description
+ *             Get address of the traced function (for tracing programs).
+ *     Return
+ *             Address of the traced function.
  */
 #define __BPF_FUNC_MAPPER(FN)          \
        FN(unspec),                     \
@@ -5016,6 +5022,7 @@ union bpf_attr {
        FN(timer_set_callback),         \
        FN(timer_start),                \
        FN(timer_cancel),               \
+       FN(get_func_ip),                \
        /* */
 
 /* integer value in 'imm' field of BPF_CALL instruction selects which helper
index 344ee67265cc70d72d48ff86bdcc140e945ac419..ceef190514e4199a808ab02f8860c0ae820654f5 100644 (file)
@@ -6161,6 +6161,27 @@ static int check_bpf_snprintf_call(struct bpf_verifier_env *env,
        return err;
 }
 
+static int check_get_func_ip(struct bpf_verifier_env *env)
+{
+       enum bpf_attach_type eatype = env->prog->expected_attach_type;
+       enum bpf_prog_type type = resolve_prog_type(env->prog);
+       int func_id = BPF_FUNC_get_func_ip;
+
+       if (type == BPF_PROG_TYPE_TRACING) {
+               if (eatype != BPF_TRACE_FENTRY && eatype != BPF_TRACE_FEXIT &&
+                   eatype != BPF_MODIFY_RETURN) {
+                       verbose(env, "func %s#%d supported only for fentry/fexit/fmod_ret programs\n",
+                               func_id_name(func_id), func_id);
+                       return -ENOTSUPP;
+               }
+               return 0;
+       }
+
+       verbose(env, "func %s#%d not supported for program type %d\n",
+               func_id_name(func_id), func_id, type);
+       return -ENOTSUPP;
+}
+
 static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
                             int *insn_idx_p)
 {
@@ -6439,6 +6460,12 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
        if (func_id == BPF_FUNC_get_stackid || func_id == BPF_FUNC_get_stack)
                env->prog->call_get_stack = true;
 
+       if (func_id == BPF_FUNC_get_func_ip) {
+               if (check_get_func_ip(env))
+                       return -ENOTSUPP;
+               env->prog->call_get_func_ip = true;
+       }
+
        if (changes_data)
                clear_all_pkt_pointers(env);
        return 0;
@@ -12632,6 +12659,7 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
 {
        struct bpf_prog *prog = env->prog;
        bool expect_blinding = bpf_jit_blinding_enabled(prog);
+       enum bpf_prog_type prog_type = resolve_prog_type(prog);
        struct bpf_insn *insn = prog->insnsi;
        const struct bpf_func_proto *fn;
        const int insn_cnt = prog->len;
@@ -12998,6 +13026,21 @@ patch_map_ops_generic:
                        continue;
                }
 
+               /* Implement bpf_get_func_ip inline. */
+               if (prog_type == BPF_PROG_TYPE_TRACING &&
+                   insn->imm == BPF_FUNC_get_func_ip) {
+                       /* Load IP address from ctx - 8 */
+                       insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8);
+
+                       new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, 1);
+                       if (!new_prog)
+                               return -ENOMEM;
+
+                       env->prog = prog = new_prog;
+                       insn      = new_prog->insnsi + i + delta;
+                       continue;
+               }
+
 patch_call_imm:
                fn = env->ops->get_func_proto(insn->imm, env->prog);
                /* all functions that have prototype and verifier allowed
index 6c77d25137e06c648eedb40288b6513d6baa394e..3e71503eeb23eb887c7b5426711eae6158cfc9e1 100644 (file)
@@ -948,6 +948,19 @@ const struct bpf_func_proto bpf_snprintf_btf_proto = {
        .arg5_type      = ARG_ANYTHING,
 };
 
+BPF_CALL_1(bpf_get_func_ip_tracing, void *, ctx)
+{
+       /* This helper call is inlined by verifier. */
+       return ((u64 *)ctx)[-1];
+}
+
+static const struct bpf_func_proto bpf_get_func_ip_proto_tracing = {
+       .func           = bpf_get_func_ip_tracing,
+       .gpl_only       = true,
+       .ret_type       = RET_INTEGER,
+       .arg1_type      = ARG_PTR_TO_CTX,
+};
+
 const struct bpf_func_proto *
 bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
 {
@@ -1058,6 +1071,8 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
                return &bpf_for_each_map_elem_proto;
        case BPF_FUNC_snprintf:
                return &bpf_snprintf_proto;
+       case BPF_FUNC_get_func_ip:
+               return &bpf_get_func_ip_proto_tracing;
        default:
                return bpf_base_func_proto(func_id);
        }
index 3544ec5234f09cbd7f1782b22a44b531a595b7f9..89688f16ad60c4e488f6cf8844665ff09d1aa83e 100644 (file)
@@ -4841,6 +4841,12 @@ union bpf_attr {
  *             **-EINVAL** if *timer* was not initialized with bpf_timer_init() earlier.
  *             **-EDEADLK** if callback_fn tried to call bpf_timer_cancel() on its
  *             own timer which would have led to a deadlock otherwise.
+ *
+ * u64 bpf_get_func_ip(void *ctx)
+ *     Description
+ *             Get address of the traced function (for tracing programs).
+ *     Return
+ *             Address of the traced function.
  */
 #define __BPF_FUNC_MAPPER(FN)          \
        FN(unspec),                     \
@@ -5016,6 +5022,7 @@ union bpf_attr {
        FN(timer_set_callback),         \
        FN(timer_start),                \
        FN(timer_cancel),               \
+       FN(get_func_ip),                \
        /* */
 
 /* integer value in 'imm' field of BPF_CALL instruction selects which helper