Basic implementation of stacktrace table
authorBrenden Blanco <bblanco@plumgrid.com>
Fri, 4 Mar 2016 20:02:10 +0000 (12:02 -0800)
committerBrenden Blanco <bblanco@plumgrid.com>
Fri, 4 Mar 2016 20:03:50 +0000 (12:03 -0800)
This is the initial implementation of bpf_get_stackid with python
helpers. This doesn't expose the different methods of calling
(kernel/user, fast, or reuse).

Signed-off-by: Brenden Blanco <bblanco@plumgrid.com>
src/cc/export/helpers.h
src/cc/frontends/clang/b_frontend_action.cc
src/python/bcc/table.py
tests/python/CMakeLists.txt
tests/python/test_stackid.py [new file with mode: 0755]

index 3d16a02..c359c3e 100644 (file)
@@ -25,6 +25,12 @@ R"********(
 #error "CONFIG_BPF_SYSCALL is undefined, please check your .config or ask your Linux distro to enable this feature"
 #endif
 
+#ifdef PERF_MAX_STACK_DEPTH
+#define BPF_MAX_STACK_DEPTH PERF_MAX_STACK_DEPTH
+#else
+#define BPF_MAX_STACK_DEPTH 127
+#endif
+
 /* helper macro to place programs, maps, license in
  * different sections in elf_bpf file. Section names
  * are interpreted by elf_bpf loader
@@ -104,6 +110,16 @@ struct _name##_table_t _name
 #define BPF_HISTOGRAM(...) \
   BPF_HISTX(__VA_ARGS__, BPF_HIST3, BPF_HIST2, BPF_HIST1)(__VA_ARGS__)
 
+#define BPF_STACK_TRACE(_name, _max_entries) \
+struct _name##_table_t { \
+  int key; \
+  struct { u64 data[BPF_MAX_STACK_DEPTH]; } leaf; \
+  int (*lookup) (void *); \
+  struct { u64 data[BPF_MAX_STACK_DEPTH]; } data[_max_entries]; \
+}; \
+__attribute__((section("maps/stacktrace"))) \
+struct _name##_table_t _name
+
 // packet parsing state machine helpers
 #define cursor_advance(_cursor, _len) \
   ({ void *_tmp = _cursor; _cursor += _len; _tmp; })
@@ -170,7 +186,7 @@ static int (*bpf_skb_load_bytes)(void *ctx, int offset, void *to, u32 len) =
   (void *) BPF_FUNC_skb_load_bytes;
 #endif
 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4,6,0)
-static int (*bpf_get_stackid)(void *ctx, void *map) =
+static int (*bpf_get_stackid_)(void *ctx, void *map, u64 flags) =
   (void *) BPF_FUNC_get_stackid;
 static int (*bpf_csum_diff)(void *from, u64 from_size, void *to, u64 to_size, u64 seed) =
   (void *) BPF_FUNC_csum_diff;
@@ -365,6 +381,11 @@ int bpf_map_delete_elem_(uintptr_t map, void *key) {
 }
 
 static inline __attribute__((always_inline))
+int bpf_get_stackid(uintptr_t map, void *ctx, u64 flags) {
+  return bpf_get_stackid_(ctx, (void *)map, flags);
+}
+
+static inline __attribute__((always_inline))
 SEC("helpers")
 int bpf_l3_csum_replace_(void *ctx, u64 off, u64 from, u64 to, u64 flags) {
   switch (flags & 0xf) {
index cbd91bd..f7a2500 100644 (file)
@@ -354,8 +354,14 @@ bool BTypeVisitor::VisitCallExpr(CallExpr *Call) {
           txt += ", bpf_get_smp_processor_id(), " + args_other + ")";
         } else {
           if (memb_name == "lookup") {
-            prefix = "bpf_map_lookup_elem";
-            suffix = ")";
+            if (table_it->type == BPF_MAP_TYPE_STACK_TRACE) {
+              prefix = "bpf_get_stackid";
+              // TODO: expose the different flags, how?
+              suffix = ", 0)";
+            } else {
+              prefix = "bpf_map_lookup_elem";
+              suffix = ")";
+            }
           } else if (memb_name == "update") {
             prefix = "bpf_map_update_elem";
             suffix = ", " + map_update_policy + ")";
@@ -578,6 +584,8 @@ bool BTypeVisitor::VisitVarDecl(VarDecl *Decl) {
     } else if (A->getName() == "maps/perf_array") {
       if (KERNEL_VERSION(major,minor,0) >= KERNEL_VERSION(4,3,0))
         map_type = BPF_MAP_TYPE_PERF_EVENT_ARRAY;
+    } else if (A->getName() == "maps/stacktrace") {
+      map_type = BPF_MAP_TYPE_STACK_TRACE;
     } else if (A->getName() == "maps/extern") {
       is_extern = true;
       table.fd = SharedTables::instance()->lookup_fd(table.name);
index 26fa868..c33cb39 100644 (file)
@@ -22,6 +22,9 @@ BPF_MAP_TYPE_HASH = 1
 BPF_MAP_TYPE_ARRAY = 2
 BPF_MAP_TYPE_PROG_ARRAY = 3
 BPF_MAP_TYPE_PERF_EVENT_ARRAY = 4
+BPF_MAP_TYPE_PERCPU_HASH = 5
+BPF_MAP_TYPE_PERCPU_ARRAY = 6
+BPF_MAP_TYPE_STACK_TRACE = 7
 
 stars_max = 40
 
@@ -85,6 +88,12 @@ def Table(bpf, map_id, map_fd, keytype, leaftype):
         t = ProgArray(bpf, map_id, map_fd, keytype, leaftype)
     elif ttype == BPF_MAP_TYPE_PERF_EVENT_ARRAY:
         t = PerfEventArray(bpf, map_id, map_fd, keytype, leaftype)
+    elif ttype == BPF_MAP_TYPE_PERCPU_HASH:
+        t = PerCpuHashTable(bpf, map_id, map_fd, keytype, leaftype)
+    elif ttype == BPF_MAP_TYPE_PERCPU_ARRAY:
+        t = PerCpuArray(bpf, map_id, map_fd, keytype, leaftype)
+    elif ttype == BPF_MAP_TYPE_STACK_TRACE:
+        t = StackTrace(bpf, map_id, map_fd, keytype, leaftype)
     if t == None:
         raise Exception("Unknown table type %d" % ttype)
     return t
@@ -393,3 +402,30 @@ class PerfEventArray(ArrayBase):
             lib.perf_reader_free(reader)
             del(self.bpf.open_kprobes()[(id(self), key)])
         del self._cbs[key]
+
+class PerCpuHashTable(TableBase):
+    def __init__(self, *args, **kwargs):
+        raise Exception("Unsupported")
+
+class PerCpuArray(ArrayBase):
+    def __init__(self, *args, **kwargs):
+        raise Exception("Unsupported")
+
+class StackTrace(TableBase):
+    def __init__(self, *args, **kwargs):
+        super(StackTrace, self).__init__(*args, **kwargs)
+
+    def __len__(self):
+        i = 0
+        for k in self: i += 1
+        return i
+
+    def __delitem__(self, key):
+        key_p = ct.pointer(key)
+        res = lib.bpf_delete_elem(self.map_fd, ct.cast(key_p, ct.c_void_p))
+        if res < 0:
+            raise KeyError
+
+    def clear(self):
+        pass
+
index bfa2e98..04421c9 100644 (file)
@@ -48,6 +48,8 @@ add_test(NAME py_array WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
   COMMAND ${TEST_WRAPPER} py_array sudo ${CMAKE_CURRENT_SOURCE_DIR}/test_array.py)
 add_test(NAME py_uprobes WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
   COMMAND ${TEST_WRAPPER} py_uprobes sudo ${CMAKE_CURRENT_SOURCE_DIR}/test_uprobes.py)
+add_test(NAME py_test_stackid WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
+  COMMAND ${TEST_WRAPPER} py_stackid sudo ${CMAKE_CURRENT_SOURCE_DIR}/test_stackid.py)
 
 add_test(NAME py_test_dump_func WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
   COMMAND ${TEST_WRAPPER} py_dump_func simple ${CMAKE_CURRENT_SOURCE_DIR}/test_dump_func.py)
diff --git a/tests/python/test_stackid.py b/tests/python/test_stackid.py
new file mode 100755 (executable)
index 0000000..5cafea5
--- /dev/null
@@ -0,0 +1,37 @@
+#!/usr/bin/python
+# Copyright (c) PLUMgrid, Inc.
+# Licensed under the Apache License, Version 2.0 (the "License")
+
+import bcc
+import unittest
+
+class TestStackid(unittest.TestCase):
+    def test_simple(self):
+        b = bcc.BPF(text="""
+#include <uapi/linux/ptrace.h>
+#include <linux/bpf.h>
+BPF_STACK_TRACE(stack_traces, 10240);
+BPF_HASH(stack_entries, int, int);
+BPF_HASH(stub);
+int kprobe__htab_map_delete_elem(struct pt_regs *ctx, struct bpf_map *map, u64 *k) {
+    int id = stack_traces.lookup(ctx);
+    int key = 1;
+    stack_entries.update(&key, &id);
+    return 0;
+}
+""")
+        stub = b["stub"]
+        stack_traces = b["stack_traces"]
+        stack_entries = b["stack_entries"]
+        try: del stub[stub.Key(1)]
+        except: pass
+        k = stack_entries.Key(1)
+        self.assertIn(k, stack_entries)
+        stackid = stack_entries[k]
+        self.assertIsNotNone(stackid)
+        stack = stack_traces[stackid].data
+        self.assertEqual(b.ksym(stack[0]), "htab_map_delete_elem")
+
+
+if __name__ == "__main__":
+    unittest.main()