Increments the key's value by `increment_amount`, which defaults to 1. Used for histograms.
+```map.increment()``` are not atomic. In the concurrency case. If you want more accurate results, use ```map.atomic_increment()``` instead of ```map.increment()```. The overhead of ```map.increment()``` and ```map.atomic_increment()``` is similar.
+
+Note. When using ```map.atomic_increment()``` to operate on a BPF map of type ```BPF_MAP_TYPE_HASH```, ```map.atomic_increment()``` does not guarantee the atomicity of the operation when the specified key does not exist.
+
Examples in situ:
[search /examples](https://github.com/iovisor/bcc/search?q=increment+path%3Aexamples&type=Code),
[search /tools](https://github.com/iovisor/bcc/search?q=increment+path%3Atools&type=Code)
int (*delete) (_key_type *); \
void (*call) (void *, int index); \
void (*increment) (_key_type, ...); \
+ void (*atomic_increment) (_key_type, ...); \
int (*get_stackid) (void *, u64); \
u32 max_entries; \
int flags; \
}
txt += "}";
txt += "leaf;})";
- } else if (memb_name == "increment") {
+ } else if (memb_name == "increment" || memb_name == "atomic_increment") {
string name = string(Ref->getDecl()->getName());
string arg0 = rewriter_.getRewrittenText(expansionRange(Call->getArg(0)->getSourceRange()));
string update = "bpf_map_update_elem_(bpf_pseudo_fd(1, " + fd + ")";
txt = "({ typeof(" + name + ".key) _key = " + arg0 + "; ";
txt += "typeof(" + name + ".leaf) *_leaf = " + lookup + ", &_key); ";
+ txt += "if (_leaf) ";
- txt += "if (_leaf) (*_leaf) += " + increment_value + ";";
+ if (memb_name == "atomic_increment") {
+ txt += "lock_xadd(_leaf, " + increment_value + ");";
+ } else {
+ txt += "(*_leaf) += " + increment_value + ";";
+ }
if (desc->second.type == BPF_MAP_TYPE_HASH) {
txt += "else { typeof(" + name + ".leaf) _zleaf; __builtin_memset(&_zleaf, 0, sizeof(_zleaf)); ";
txt += "_zleaf += " + increment_value + ";";
struct bpf_map;
BPF_HASH(map);
int map_delete(struct pt_regs *ctx, struct bpf_map *bpfmap, u64 *k) {
- map.increment(42, 10);
+ map.increment(42, 5);
+ map.atomic_increment(42, 5);
return 0;
}
""")
BPF_HASH(stub);
int kprobe__htab_map_delete_elem(struct pt_regs *ctx, struct bpf_map *map, u64 *k) {
hist1.increment(bpf_log2l(*k));
+ hist1.atomic_increment(bpf_log2l(*k));
return 0;
}
""")
BPF_HASH(stub2);
int kprobe__htab_map_delete_elem(struct pt_regs *ctx, struct bpf_map *map, u64 *k) {
hist1.increment((Key){map, bpf_log2l(*k)});
+ hist1.atomic_increment((Key){map, bpf_log2l(*k)});
return 0;
}
""")
#else
Key k = {.slot = bpf_log2l(prev->start_boottime)};
#endif
- if (!bpf_get_current_comm(&k.name, sizeof(k.name)))
+ if (!bpf_get_current_comm(&k.name, sizeof(k.name))) {
hist1.increment(k);
+ hist1.atomic_increment(k);
+ }
return 0;
}
""")
return 0;
u64 *prevp = prev.lookup(&cpu);
- if (prevp)
+ if (prevp) {
dist.increment(bpf_log2l(val - *prevp));
+ dist.atomic_increment(bpf_log2l(val - *prevp));
+ }
return 0;
}
"""