kprobes: Replace rp->free_instance with freelist
authorPeter Zijlstra <peterz@infradead.org>
Sat, 29 Aug 2020 13:03:56 +0000 (22:03 +0900)
committerIngo Molnar <mingo@kernel.org>
Mon, 12 Oct 2020 16:27:28 +0000 (18:27 +0200)
Gets rid of rp->lock, and as a result kretprobes are now fully
lockless.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Masami Hiramatsu <mhiramat@kernel.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Link: https://lore.kernel.org/r/159870623583.1229682.17472357584134058687.stgit@devnote2
include/linux/kprobes.h
kernel/kprobes.c

index 00cf4421efd593f4dcd033b1844678fd6590ab99..b7824e3f1ef5a2909241fd497c581073366f35f4 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/mutex.h>
 #include <linux/ftrace.h>
 #include <linux/refcount.h>
+#include <linux/freelist.h>
 #include <asm/kprobes.h>
 
 #ifdef CONFIG_KPROBES
@@ -157,17 +158,16 @@ struct kretprobe {
        int maxactive;
        int nmissed;
        size_t data_size;
-       struct hlist_head free_instances;
+       struct freelist_head freelist;
        struct kretprobe_holder *rph;
-       raw_spinlock_t lock;
 };
 
 struct kretprobe_instance {
        union {
-               struct llist_node llist;
-               struct hlist_node hlist;
+               struct freelist_node freelist;
                struct rcu_head rcu;
        };
+       struct llist_node llist;
        struct kretprobe_holder *rph;
        kprobe_opcode_t *ret_addr;
        void *fp;
index 850ee36a4051f23f8bf27e01080dd89ed893381b..30b8fe7d571ddceb958e5accef3a41039c46f2ab 100644 (file)
@@ -1228,11 +1228,8 @@ static void recycle_rp_inst(struct kretprobe_instance *ri)
 {
        struct kretprobe *rp = get_kretprobe(ri);
 
-       INIT_HLIST_NODE(&ri->hlist);
        if (likely(rp)) {
-               raw_spin_lock(&rp->lock);
-               hlist_add_head(&ri->hlist, &rp->free_instances);
-               raw_spin_unlock(&rp->lock);
+               freelist_add(&ri->freelist, &rp->freelist);
        } else
                call_rcu(&ri->rcu, free_rp_inst_rcu);
 }
@@ -1290,11 +1287,14 @@ NOKPROBE_SYMBOL(kprobe_flush_task);
 static inline void free_rp_inst(struct kretprobe *rp)
 {
        struct kretprobe_instance *ri;
-       struct hlist_node *next;
+       struct freelist_node *node;
        int count = 0;
 
-       hlist_for_each_entry_safe(ri, next, &rp->free_instances, hlist) {
-               hlist_del(&ri->hlist);
+       node = rp->freelist.head;
+       while (node) {
+               ri = container_of(node, struct kretprobe_instance, freelist);
+               node = node->next;
+
                kfree(ri);
                count++;
        }
@@ -1925,32 +1925,26 @@ NOKPROBE_SYMBOL(__kretprobe_trampoline_handler)
 static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
 {
        struct kretprobe *rp = container_of(p, struct kretprobe, kp);
-       unsigned long flags = 0;
        struct kretprobe_instance *ri;
+       struct freelist_node *fn;
 
-       /* TODO: consider to only swap the RA after the last pre_handler fired */
-       raw_spin_lock_irqsave(&rp->lock, flags);
-       if (!hlist_empty(&rp->free_instances)) {
-               ri = hlist_entry(rp->free_instances.first,
-                               struct kretprobe_instance, hlist);
-               hlist_del(&ri->hlist);
-               raw_spin_unlock_irqrestore(&rp->lock, flags);
-
-               if (rp->entry_handler && rp->entry_handler(ri, regs)) {
-                       raw_spin_lock_irqsave(&rp->lock, flags);
-                       hlist_add_head(&ri->hlist, &rp->free_instances);
-                       raw_spin_unlock_irqrestore(&rp->lock, flags);
-                       return 0;
-               }
-
-               arch_prepare_kretprobe(ri, regs);
+       fn = freelist_try_get(&rp->freelist);
+       if (!fn) {
+               rp->nmissed++;
+               return 0;
+       }
 
-               __llist_add(&ri->llist, &current->kretprobe_instances);
+       ri = container_of(fn, struct kretprobe_instance, freelist);
 
-       } else {
-               rp->nmissed++;
-               raw_spin_unlock_irqrestore(&rp->lock, flags);
+       if (rp->entry_handler && rp->entry_handler(ri, regs)) {
+               freelist_add(&ri->freelist, &rp->freelist);
+               return 0;
        }
+
+       arch_prepare_kretprobe(ri, regs);
+
+       __llist_add(&ri->llist, &current->kretprobe_instances);
+
        return 0;
 }
 NOKPROBE_SYMBOL(pre_handler_kretprobe);
@@ -2007,8 +2001,7 @@ int register_kretprobe(struct kretprobe *rp)
                rp->maxactive = num_possible_cpus();
 #endif
        }
-       raw_spin_lock_init(&rp->lock);
-       INIT_HLIST_HEAD(&rp->free_instances);
+       rp->freelist.head = NULL;
        rp->rph = kzalloc(sizeof(struct kretprobe_holder), GFP_KERNEL);
        if (!rp->rph)
                return -ENOMEM;
@@ -2023,8 +2016,7 @@ int register_kretprobe(struct kretprobe *rp)
                        return -ENOMEM;
                }
                inst->rph = rp->rph;
-               INIT_HLIST_NODE(&inst->hlist);
-               hlist_add_head(&inst->hlist, &rp->free_instances);
+               freelist_add(&inst->freelist, &rp->freelist);
        }
        refcount_set(&rp->rph->ref, i);