}
bool VisitCallExpr(CallExpr *E) {
needs_probe_ = false;
+ if (VarDecl *V = dyn_cast<VarDecl>(E->getCalleeDecl())) {
+ needs_probe_ = V->getName() == "bpf_get_current_task";
+ }
return false;
}
bool VisitParenExpr(ParenExpr *E) {
from bcc import BPF
import ctypes as ct
-from unittest import main, TestCase
+from unittest import main, skipUnless, TestCase
import os
import sys
from contextlib import contextmanager
+import distutils.version
@contextmanager
def redirect_stderr(to):
sys.stderr.flush()
os.dup2(copied.fileno(), stderr_fd)
+def kernel_version_ge(major, minor):
+ # True if running kernel is >= X.Y
+ version = distutils.version.LooseVersion(os.uname()[2]).version
+ if version[0] > major:
+ return True
+ if version[0] < major:
+ return False
+ if minor and version[1] < minor:
+ return False
+ return True
+
class TestClang(TestCase):
def test_complex(self):
b = BPF(src_file="test_clang_complex.c", debug=0)
"""
b = BPF(text=text)
+ @skipUnless(kernel_version_ge(4,8), "requires kernel >= 4.8")
+ def test_ext_ptr_from_helper(self):
+ text = """
+#include <linux/sched.h>
+int test(struct pt_regs *ctx) {
+ struct task_struct *task = (struct task_struct *)bpf_get_current_task();
+ return task->prio;
+}
+"""
+ b = BPF(text=text)
+ fn = b.load_func("test", BPF.KPROBE)
+
def test_unary_operator(self):
text = """
#include <linux/fs.h>
struct task_struct *task = NULL;
struct cfs_rq_partial *my_q = NULL;
task = (struct task_struct *)bpf_get_current_task();
- bpf_probe_read(&my_q, sizeof(my_q), &task->se.cfs_rq);
- bpf_probe_read(&len, sizeof(len), &my_q->nr_running);
+ my_q = (struct cfs_rq_partial *)task->se.cfs_rq;
+ len = my_q->nr_running;
struct data_t data = {.ts = now, .cpu = cpu, .len = len};
events.perf_submit(ctx, &data, sizeof(data));
bpf_get_current_comm(event.enter.comm, sizeof(event.enter.comm));
event.enter.flags = flags;
task = (struct task_struct *)bpf_get_current_task();
- bpf_probe_read(&nsproxy, sizeof(nsproxy), &task->nsproxy);
- bpf_probe_read(&mnt_ns, sizeof(mnt_ns), &nsproxy->mnt_ns);
- bpf_probe_read(&event.enter.mnt_ns, sizeof(event.enter.mnt_ns),
- &mnt_ns->ns.inum);
+ nsproxy = task->nsproxy;
+ mnt_ns = nsproxy->mnt_ns;
+ event.enter.mnt_ns = mnt_ns->ns.inum;
events.perf_submit(ctx, &event, sizeof(event));
event.type = EVENT_MOUNT_SOURCE;
bpf_get_current_comm(event.enter.comm, sizeof(event.enter.comm));
event.enter.flags = flags;
task = (struct task_struct *)bpf_get_current_task();
- bpf_probe_read(&nsproxy, sizeof(nsproxy), &task->nsproxy);
- bpf_probe_read(&mnt_ns, sizeof(mnt_ns), &nsproxy->mnt_ns);
- bpf_probe_read(&event.enter.mnt_ns, sizeof(event.enter.mnt_ns),
- &mnt_ns->ns.inum);
+ nsproxy = task->nsproxy;
+ mnt_ns = nsproxy->mnt_ns;
+ event.enter.mnt_ns = mnt_ns->ns.inum;
events.perf_submit(ctx, &event, sizeof(event));
event.type = EVENT_UMOUNT_TARGET;
// of BPF will support task_rq(p) or something similar as a more reliable
// interface.
task = (struct task_struct *)bpf_get_current_task();
- bpf_probe_read(&my_q, sizeof(my_q), &task->se.cfs_rq);
- bpf_probe_read(&len, sizeof(len), &my_q->nr_running);
+ my_q = (struct cfs_rq_partial *)task->se.cfs_rq;
+ len = my_q->nr_running;
// Calculate run queue length by subtracting the currently running task,
// if present. len 0 == idle, len 1 == one running task.