futex: Prevent robust futex exit race
[platform/kernel/linux-rpi.git] / kernel / kprobes.c
index ab257be..aed9078 100644 (file)
@@ -483,6 +483,7 @@ static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer);
  */
 static void do_optimize_kprobes(void)
 {
+       lockdep_assert_held(&text_mutex);
        /*
         * The optimization/unoptimization refers online_cpus via
         * stop_machine() and cpu-hotplug modifies online_cpus.
@@ -500,9 +501,7 @@ static void do_optimize_kprobes(void)
            list_empty(&optimizing_list))
                return;
 
-       mutex_lock(&text_mutex);
        arch_optimize_kprobes(&optimizing_list);
-       mutex_unlock(&text_mutex);
 }
 
 /*
@@ -513,6 +512,7 @@ static void do_unoptimize_kprobes(void)
 {
        struct optimized_kprobe *op, *tmp;
 
+       lockdep_assert_held(&text_mutex);
        /* See comment in do_optimize_kprobes() */
        lockdep_assert_cpus_held();
 
@@ -520,7 +520,6 @@ static void do_unoptimize_kprobes(void)
        if (list_empty(&unoptimizing_list))
                return;
 
-       mutex_lock(&text_mutex);
        arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list);
        /* Loop free_list for disarming */
        list_for_each_entry_safe(op, tmp, &freeing_list, list) {
@@ -537,7 +536,6 @@ static void do_unoptimize_kprobes(void)
                } else
                        list_del_init(&op->list);
        }
-       mutex_unlock(&text_mutex);
 }
 
 /* Reclaim all kprobes on the free_list */
@@ -546,8 +544,14 @@ static void do_free_cleaned_kprobes(void)
        struct optimized_kprobe *op, *tmp;
 
        list_for_each_entry_safe(op, tmp, &freeing_list, list) {
-               BUG_ON(!kprobe_unused(&op->kp));
                list_del_init(&op->list);
+               if (WARN_ON_ONCE(!kprobe_unused(&op->kp))) {
+                       /*
+                        * This must not happen, but if there is a kprobe
+                        * still in use, keep it on kprobes hash list.
+                        */
+                       continue;
+               }
                free_aggr_kprobe(&op->kp);
        }
 }
@@ -563,6 +567,7 @@ static void kprobe_optimizer(struct work_struct *work)
 {
        mutex_lock(&kprobe_mutex);
        cpus_read_lock();
+       mutex_lock(&text_mutex);
        /* Lock modules while optimizing kprobes */
        mutex_lock(&module_mutex);
 
@@ -590,6 +595,7 @@ static void kprobe_optimizer(struct work_struct *work)
        do_free_cleaned_kprobes();
 
        mutex_unlock(&module_mutex);
+       mutex_unlock(&text_mutex);
        cpus_read_unlock();
        mutex_unlock(&kprobe_mutex);
 
@@ -700,7 +706,7 @@ static void unoptimize_kprobe(struct kprobe *p, bool force)
 }
 
 /* Cancel unoptimizing for reusing */
-static void reuse_unused_kprobe(struct kprobe *ap)
+static int reuse_unused_kprobe(struct kprobe *ap)
 {
        struct optimized_kprobe *op;
 
@@ -714,8 +720,11 @@ static void reuse_unused_kprobe(struct kprobe *ap)
        /* Enable the probe again */
        ap->flags &= ~KPROBE_FLAG_DISABLED;
        /* Optimize it again (remove from op->list) */
-       BUG_ON(!kprobe_optready(ap));
+       if (!kprobe_optready(ap))
+               return -EINVAL;
+
        optimize_kprobe(ap);
+       return 0;
 }
 
 /* Remove optimized instructions */
@@ -940,11 +949,16 @@ static void __disarm_kprobe(struct kprobe *p, bool reopt)
 #define kprobe_disarmed(p)                     kprobe_disabled(p)
 #define wait_for_kprobe_optimizer()            do {} while (0)
 
-/* There should be no unused kprobes can be reused without optimization */
-static void reuse_unused_kprobe(struct kprobe *ap)
+static int reuse_unused_kprobe(struct kprobe *ap)
 {
+       /*
+        * If the optimized kprobe is NOT supported, the aggr kprobe is
+        * released at the same time that the last aggregated kprobe is
+        * unregistered.
+        * Thus there should be no chance to reuse unused kprobe.
+        */
        printk(KERN_ERR "Error: There should be no unused kprobe here.\n");
-       BUG_ON(kprobe_unused(ap));
+       return -EINVAL;
 }
 
 static void free_aggr_kprobe(struct kprobe *p)
@@ -1318,9 +1332,12 @@ static int register_aggr_kprobe(struct kprobe *orig_p, struct kprobe *p)
                        goto out;
                }
                init_aggr_kprobe(ap, orig_p);
-       } else if (kprobe_unused(ap))
+       } else if (kprobe_unused(ap)) {
                /* This probe is going to die. Rescue it */
-               reuse_unused_kprobe(ap);
+               ret = reuse_unused_kprobe(ap);
+               if (ret)
+                       goto out;
+       }
 
        if (kprobe_gone(ap)) {
                /*
@@ -1494,7 +1511,8 @@ static int check_kprobe_address_safe(struct kprobe *p,
        /* Ensure it is not in reserved area nor out of text */
        if (!kernel_text_address((unsigned long) p->addr) ||
            within_kprobe_blacklist((unsigned long) p->addr) ||
-           jump_label_text_reserved(p->addr, p->addr)) {
+           jump_label_text_reserved(p->addr, p->addr) ||
+           find_bug((unsigned long)p->addr)) {
                ret = -EINVAL;
                goto out;
        }