bpf, lru: avoid messing with eviction heuristics upon syscall lookup
[platform/kernel/linux-rpi.git] / kernel / bpf / hashtab.c
index cebadd6..6fe7279 100644 (file)
@@ -518,18 +518,30 @@ static u32 htab_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
        return insn - insn_buf;
 }
 
-static void *htab_lru_map_lookup_elem(struct bpf_map *map, void *key)
+static __always_inline void *__htab_lru_map_lookup_elem(struct bpf_map *map,
+                                                       void *key, const bool mark)
 {
        struct htab_elem *l = __htab_map_lookup_elem(map, key);
 
        if (l) {
-               bpf_lru_node_set_ref(&l->lru_node);
+               if (mark)
+                       bpf_lru_node_set_ref(&l->lru_node);
                return l->key + round_up(map->key_size, 8);
        }
 
        return NULL;
 }
 
+static void *htab_lru_map_lookup_elem(struct bpf_map *map, void *key)
+{
+       return __htab_lru_map_lookup_elem(map, key, true);
+}
+
+static void *htab_lru_map_lookup_elem_sys(struct bpf_map *map, void *key)
+{
+       return __htab_lru_map_lookup_elem(map, key, false);
+}
+
 static u32 htab_lru_map_gen_lookup(struct bpf_map *map,
                                   struct bpf_insn *insn_buf)
 {
@@ -1206,6 +1218,7 @@ const struct bpf_map_ops htab_lru_map_ops = {
        .map_free = htab_map_free,
        .map_get_next_key = htab_map_get_next_key,
        .map_lookup_elem = htab_lru_map_lookup_elem,
+       .map_lookup_elem_sys_only = htab_lru_map_lookup_elem_sys,
        .map_update_elem = htab_lru_map_update_elem,
        .map_delete_elem = htab_lru_map_delete_elem,
        .map_gen_lookup = htab_lru_map_gen_lookup,
@@ -1237,7 +1250,6 @@ static void *htab_lru_percpu_map_lookup_elem(struct bpf_map *map, void *key)
 
 int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value)
 {
-       struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
        struct htab_elem *l;
        void __percpu *pptr;
        int ret = -ENOENT;
@@ -1253,8 +1265,9 @@ int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value)
        l = __htab_map_lookup_elem(map, key);
        if (!l)
                goto out;
-       if (htab_is_lru(htab))
-               bpf_lru_node_set_ref(&l->lru_node);
+       /* We do not mark LRU map element here in order to not mess up
+        * eviction heuristics when user space does a map walk.
+        */
        pptr = htab_elem_get_ptr(l, map->key_size);
        for_each_possible_cpu(cpu) {
                bpf_long_memcpy(value + off,