From fa5e83df173beb6c1334737a463fc2b4ef4efa8b Mon Sep 17 00:00:00 2001 From: Yafang Shao Date: Sun, 5 Mar 2023 12:46:07 +0000 Subject: [PATCH] bpf: devmap memory usage A new helper is introduced to calculate the memory usage of devmap and devmap_hash. The number of dynamically allocated elements are recored for devmap_hash already, but not for devmap. To track the memory size of dynamically allocated elements, this patch also count the numbers for devmap. The result as follows, - before 40: devmap name count_map flags 0x80 key 4B value 4B max_entries 65536 memlock 524288B 41: devmap_hash name count_map flags 0x80 key 4B value 4B max_entries 65536 memlock 524288B - after 40: devmap name count_map flags 0x80 <<<< no elements key 4B value 4B max_entries 65536 memlock 524608B 41: devmap_hash name count_map flags 0x80 <<<< no elements key 4B value 4B max_entries 65536 memlock 524608B Note that the number of buckets is same with max_entries for devmap_hash in this case. Signed-off-by: Yafang Shao Link: https://lore.kernel.org/r/20230305124615.12358-11-laoar.shao@gmail.com Signed-off-by: Alexei Starovoitov --- kernel/bpf/devmap.c | 26 ++++++++++++++++++++++++-- 1 file changed, 24 insertions(+), 2 deletions(-) diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c index 2675fef..19b036a 100644 --- a/kernel/bpf/devmap.c +++ b/kernel/bpf/devmap.c @@ -819,8 +819,10 @@ static int dev_map_delete_elem(struct bpf_map *map, void *key) return -EINVAL; old_dev = unrcu_pointer(xchg(&dtab->netdev_map[k], NULL)); - if (old_dev) + if (old_dev) { call_rcu(&old_dev->rcu, __dev_map_entry_free); + atomic_dec((atomic_t *)&dtab->items); + } return 0; } @@ -931,6 +933,8 @@ static int __dev_map_update_elem(struct net *net, struct bpf_map *map, old_dev = unrcu_pointer(xchg(&dtab->netdev_map[i], RCU_INITIALIZER(dev))); if (old_dev) call_rcu(&old_dev->rcu, __dev_map_entry_free); + else + atomic_inc((atomic_t *)&dtab->items); return 0; } @@ -1016,6 +1020,20 @@ static int dev_hash_map_redirect(struct bpf_map *map, u64 ifindex, u64 flags) __dev_map_hash_lookup_elem); } +static u64 dev_map_mem_usage(const struct bpf_map *map) +{ + struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); + u64 usage = sizeof(struct bpf_dtab); + + if (map->map_type == BPF_MAP_TYPE_DEVMAP_HASH) + usage += (u64)dtab->n_buckets * sizeof(struct hlist_head); + else + usage += (u64)map->max_entries * sizeof(struct bpf_dtab_netdev *); + usage += atomic_read((atomic_t *)&dtab->items) * + (u64)sizeof(struct bpf_dtab_netdev); + return usage; +} + BTF_ID_LIST_SINGLE(dev_map_btf_ids, struct, bpf_dtab) const struct bpf_map_ops dev_map_ops = { .map_meta_equal = bpf_map_meta_equal, @@ -1026,6 +1044,7 @@ const struct bpf_map_ops dev_map_ops = { .map_update_elem = dev_map_update_elem, .map_delete_elem = dev_map_delete_elem, .map_check_btf = map_check_no_btf, + .map_mem_usage = dev_map_mem_usage, .map_btf_id = &dev_map_btf_ids[0], .map_redirect = dev_map_redirect, }; @@ -1039,6 +1058,7 @@ const struct bpf_map_ops dev_map_hash_ops = { .map_update_elem = dev_map_hash_update_elem, .map_delete_elem = dev_map_hash_delete_elem, .map_check_btf = map_check_no_btf, + .map_mem_usage = dev_map_mem_usage, .map_btf_id = &dev_map_btf_ids[0], .map_redirect = dev_hash_map_redirect, }; @@ -1109,9 +1129,11 @@ static int dev_map_notification(struct notifier_block *notifier, if (!dev || netdev != dev->dev) continue; odev = unrcu_pointer(cmpxchg(&dtab->netdev_map[i], RCU_INITIALIZER(dev), NULL)); - if (dev == odev) + if (dev == odev) { call_rcu(&dev->rcu, __dev_map_entry_free); + atomic_dec((atomic_t *)&dtab->items); + } } } rcu_read_unlock(); -- 2.7.4