def _generate_hash_update(self):
if self.type == "hist":
- return "%s.increment(bpf_log2l(__key));" % \
+ return "%s.atomic_increment(bpf_log2l(__key));" % \
self.probe_hash_name
else:
- return "%s.increment(__key);" % self.probe_hash_name
+ return "%s.atomic_increment(__key);" % \
+ self.probe_hash_name
def _generate_pid_filter(self):
# Kernel probes need to explicitly filter pid, because the
disk_key_t key = {.slot = bpf_log2l(delta)};
void *__tmp = (void *)req->rq_disk->disk_name;
bpf_probe_read(&key.disk, sizeof(key.disk), __tmp);
- dist.increment(key);
+ dist.atomic_increment(key);
"""
elif args.flags:
storage_str += "BPF_HISTOGRAM(dist, flag_key_t);"
store_str += """
flag_key_t key = {.slot = bpf_log2l(delta)};
key.flags = req->cmd_flags;
- dist.increment(key);
+ dist.atomic_increment(key);
"""
else:
storage_str += "BPF_HISTOGRAM(dist);"
- store_str += "dist.increment(bpf_log2l(delta));"
+ store_str += "dist.atomic_increment(bpf_log2l(delta));"
if args.extension:
storage_str += "BPF_ARRAY(extension, ext_val_t, 1);"
else:
if args.timestamp:
print("%-8s\n" % strftime("%H:%M:%S"), end="")
-
+
if args.flags:
dist.print_log2_hist(label, "flags", flags_print)
else:
{
struct proc_key_t key = {.slot = bpf_log2l(args->bytes / 1024)};
bpf_probe_read_kernel(&key.name, sizeof(key.name), args->comm);
- dist.increment(key);
+ dist.atomic_increment(key);
return 0;
}
"""
// store as histogram
dist_key_t key = {.slot = bpf_log2l(delta)};
__builtin_memcpy(&key.op, op, sizeof(key.op));
- dist.increment(key);
+ dist.atomic_increment(key);
start.delete(&tid);
return 0;
u64 ip;
key.ip = PT_REGS_IP(ctx);
- counts.increment(key); // update counter
+ counts.atomic_increment(key); // update counter
return 0;
}
section = ""
bpf_text = bpf_text.replace('STORAGE', 'BPF_HISTOGRAM(dist);')
bpf_text = bpf_text.replace('STORE',
- 'dist.increment(bpf_log2l(delta));')
+ 'dist.atomic_increment(bpf_log2l(delta));')
if debug or args.ebpf:
print(bpf_text)
if args.ebpf:
u64 delta = bpf_ktime_get_ns() - *timestampp;
FILTER
delta /= SCALE;
- latency.increment(bpf_log2l(delta));
+ latency.atomic_increment(bpf_log2l(delta));
temp.delete(&pid);
return 0;
}
*/
void count_fast(struct pt_regs *ctx) {
int key = S_REFS;
- u64 *leaf = stats.lookup(&key);
- if (leaf) (*leaf)++;
+ stats.atomic_increment(key);
}
void count_lookup(struct pt_regs *ctx) {
int key = S_SLOW;
- u64 *leaf = stats.lookup(&key);
- if (leaf) (*leaf)++;
+ stats.atomic_increment(key);
if (PT_REGS_RC(ctx) == 0) {
key = S_MISS;
- leaf = stats.lookup(&key);
- if (leaf) (*leaf)++;
+ stats.atomic_increment(key);
}
}
"""
try:
sleep(interval)
except KeyboardInterrupt:
- pass
exit()
print("%-8s: " % strftime("%H:%M:%S"), end="")
// store as histogram
dist_key_t key = {.slot = bpf_log2l(delta)};
__builtin_memcpy(&key.op, op, sizeof(key.op));
- dist.increment(key);
+ dist.atomic_increment(key);
return 0;
}
FILTERPID
FILTERCPU
int loc = LOCATION;
- u64 *val = counts.lookup(&loc);
- if (!val) {
- return 0; // Should never happen, # of locations is known
- }
- (*val)++;
+ counts.atomic_increment(loc);
return 0;
}
"""
FACTOR
// store as histogram
- dist.increment(bpf_log2l(delta));
+ dist.atomic_increment(bpf_log2l(delta));
out:
start.update(&index, &ts);
key.key.ip = ip;
key.key.pid = %s;
key.slot = bpf_log2l(delta);
- dist.increment(key);
+ dist.atomic_increment(key);
if (stack->head == 0) {
/* empty */
key.key.ip = ip;
key.key.pid = %s;
key.slot = bpf_log2l(delta);
- dist.increment(key);
+ dist.atomic_increment(key);
ipaddr.delete(&pid);
}
""" % pid)
'BPF_HASH(start, u32);')
bpf_text = bpf_text.replace('ENTRYSTORE', 'start.update(&pid, &ts);')
bpf_text = bpf_text.replace('STORE',
- 'dist.increment(bpf_log2l(delta));')
+ 'dist.atomic_increment(bpf_log2l(delta));')
bpf_text = bpf_text.replace('TYPEDEF', '')
bpf_text = bpf_text.replace('FUNCTION', '')
{
irq_key_t key = {.slot = 0 /* ignore */};
TP_DATA_LOC_READ_CONST(&key.name, name, sizeof(key.name));
- dist.increment(key);
+ dist.atomic_increment(key);
return 0;
}
"""
bpf_text = bpf_text.replace('STORE',
'irq_key_t key = {.slot = bpf_log2l(delta / %d)};' % factor +
'bpf_probe_read_kernel(&key.name, sizeof(key.name), name);' +
- 'dist.increment(key);')
+ 'dist.atomic_increment(key);')
else:
bpf_text = bpf_text.replace('STORE',
'irq_key_t key = {.slot = 0 /* ignore */};' +
'bpf_probe_read_kernel(&key.name, sizeof(key.name), name);' +
- 'dist.increment(key, delta);')
+ 'dist.atomic_increment(key, delta);')
if debug or args.ebpf:
print(bpf_text)
if args.ebpf:
* If this is the only call in the chain and predicate passes
*/
if (%s == 1 && %s && overridden < %s) {
- count.increment(zero);
+ count.atomic_increment(zero);
bpf_override_return(ctx, %s);
return 0;
}
* If all conds have been met and predicate passes
*/
if (p->conds_met == %s && %s && overridden < %s) {
- count.increment(zero);
+ count.atomic_increment(zero);
bpf_override_return(ctx, %s);
}
return 0;
// store as histogram
dist_key_t key = {.slot = bpf_log2l(delta)};
__builtin_memcpy(&key.op, op, sizeof(key.op));
- dist.increment(key);
+ dist.atomic_increment(key);
start.delete(&tid);
return 0;
BPF_ARRAY(stats, u64, S_MAXSTAT);
static void stats_increment(int key) {
- u64 *leaf = stats.lookup(&key);
- if (leaf) (*leaf)++;
+ stats.atomic_increment(key);
}
void do_count(struct pt_regs *ctx) { stats_increment(S_COUNT); }
if (f != NULL && *f == 1) {
ts = bpf_ktime_get_ns();
birth.update(&retval, &ts);
-
- u64 *count = pages.lookup(&zero);
- if (count) (*count)++; // increment read ahead pages count
+ pages.atomic_increment(zero);
}
return 0;
}
u64 *bts = birth.lookup(&arg0);
if (bts != NULL) {
delta = bpf_ktime_get_ns() - *bts;
- dist.increment(bpf_log2l(delta/1000000));
-
- u64 *count = pages.lookup(&zero);
- if (count) (*count)--; // decrement read ahead pages count
-
+ dist.atomic_increment(bpf_log2l(delta/1000000));
+ pages.atomic_increment(zero, -1);
birth.delete(&arg0); // remove the entry from hashmap
}
return 0;
'BPF_HISTOGRAM(dist, pidns_key_t);')
bpf_text = bpf_text.replace('STORE', 'pidns_key_t key = ' +
'{.id = prev->nsproxy->pid_ns_for_children->ns.inum, ' +
- '.slot = bpf_log2l(delta)}; dist.increment(key);')
+ '.slot = bpf_log2l(delta)}; dist.atomic_increment(key);')
else:
section = ""
bpf_text = bpf_text.replace('STORAGE', 'BPF_HISTOGRAM(dist);')
bpf_text = bpf_text.replace('STORE',
- 'dist.increment(bpf_log2l(delta));')
+ 'dist.atomic_increment(bpf_log2l(delta));')
if debug or args.ebpf:
print(bpf_text)
if args.ebpf:
else:
bpf_text = bpf_text.replace('STORAGE',
'BPF_HISTOGRAM(dist, unsigned int);')
- bpf_text = bpf_text.replace('STORE', 'dist.increment(len);')
+ bpf_text = bpf_text.replace('STORE', 'dist.atomic_increment(len);')
if check_runnable_weight_field():
bpf_text = bpf_text.replace('RUNNABLE_WEIGHT_FIELD', 'unsigned long runnable_weight;')
if args.dist:
bpf_text = bpf_text.replace('STORE',
'key.vec = vec; key.slot = bpf_log2l(delta / %d); ' % factor +
- 'dist.increment(key);')
+ 'dist.atomic_increment(key);')
else:
bpf_text = bpf_text.replace('STORE',
'key.vec = valp->vec; ' +
- 'dist.increment(key, delta);')
+ 'dist.atomic_increment(key, delta);')
if debug or args.ebpf:
print(bpf_text)
if args.ebpf:
key.tgid = GET_TGID;
STORE_COMM
%s
- counts.increment(key);
+ counts.atomic_increment(key);
return 0;
}
"""
STORE_HIST
key.slot = bpf_log2l(srtt);
- hist_srtt.increment(key);
+ hist_srtt.atomic_increment(key);
STORE_LATENCY
if (!categorized && (__NET_ADDR__ & __NET_MASK__) ==
(dst & __NET_MASK__)) {
struct index_key_t key = {.index = __POS__};
- ipv4_send_bytes.increment(key, size);
+ ipv4_send_bytes.atomic_increment(key, size);
categorized = 1;
}
"""
backlog_key_t key = {};
key.backlog = sk->sk_max_ack_backlog;
key.slot = bpf_log2l(sk->sk_ack_backlog);
- dist.increment(key);
+ dist.atomic_increment(key);
return 0;
};
int do_count(struct pt_regs *ctx) {
struct key_t key = {};
key.ip = PT_REGS_IP(ctx);
- counts.increment(key);
+ counts.atomic_increment(key);
return 0;
}
""")
BPF_ARRAY(stats, u64, S_MAXSTAT);
static void stats_increment(int key) {
- u64 *leaf = stats.lookup(&key);
- if (leaf) (*leaf)++;
+ stats.atomic_increment(key);
}
"""
bpf_probe_read_kernel(&key.target, sizeof(key.target), p->comm);
bpf_get_current_comm(&key.waker, sizeof(key.waker));
- counts.increment(key, delta);
+ counts.atomic_increment(key, delta);
return 0;
}
"""
// store as histogram
dist_key_t key = {.slot = bpf_log2l(delta)};
__builtin_memcpy(&key.op, op, sizeof(key.op));
- dist.increment(key);
+ dist.atomic_increment(key);
start.delete(&tid);
return 0;
// store as histogram
dist_key_t key = {.slot = bpf_log2l(delta)};
__builtin_memcpy(&key.op, op, sizeof(key.op));
- dist.increment(key);
+ dist.atomic_increment(key);
start.delete(&tid);
return 0;