stop_machine: Use raw spinlocks
authorThomas Gleixner <tglx@linutronix.de>
Mon, 23 Apr 2018 19:16:35 +0000 (21:16 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 3 Aug 2018 05:50:38 +0000 (07:50 +0200)
[ Upstream commit de5b55c1d4e30740009864eb35ce4ed856aac01d ]

Use raw-locks in stop_machine() to allow locking in irq-off and
preempt-disabled regions on -RT. This also documents the possible locking
context in general.

[bigeasy: update patch description.]
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Link: https://lkml.kernel.org/r/20180423191635.6014-1-bigeasy@linutronix.de
Signed-off-by: Sasha Levin <alexander.levin@microsoft.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
kernel/stop_machine.c

index 2f6fa95..1ff523d 100644 (file)
@@ -37,7 +37,7 @@ struct cpu_stop_done {
 struct cpu_stopper {
        struct task_struct      *thread;
 
-       spinlock_t              lock;
+       raw_spinlock_t          lock;
        bool                    enabled;        /* is this stopper enabled? */
        struct list_head        works;          /* list of pending works */
 
@@ -81,13 +81,13 @@ static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work)
        unsigned long flags;
        bool enabled;
 
-       spin_lock_irqsave(&stopper->lock, flags);
+       raw_spin_lock_irqsave(&stopper->lock, flags);
        enabled = stopper->enabled;
        if (enabled)
                __cpu_stop_queue_work(stopper, work, &wakeq);
        else if (work->done)
                cpu_stop_signal_done(work->done);
-       spin_unlock_irqrestore(&stopper->lock, flags);
+       raw_spin_unlock_irqrestore(&stopper->lock, flags);
 
        wake_up_q(&wakeq);
 
@@ -237,8 +237,8 @@ static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1,
        DEFINE_WAKE_Q(wakeq);
        int err;
 retry:
-       spin_lock_irq(&stopper1->lock);
-       spin_lock_nested(&stopper2->lock, SINGLE_DEPTH_NESTING);
+       raw_spin_lock_irq(&stopper1->lock);
+       raw_spin_lock_nested(&stopper2->lock, SINGLE_DEPTH_NESTING);
 
        err = -ENOENT;
        if (!stopper1->enabled || !stopper2->enabled)
@@ -261,8 +261,8 @@ retry:
        __cpu_stop_queue_work(stopper1, work1, &wakeq);
        __cpu_stop_queue_work(stopper2, work2, &wakeq);
 unlock:
-       spin_unlock(&stopper2->lock);
-       spin_unlock_irq(&stopper1->lock);
+       raw_spin_unlock(&stopper2->lock);
+       raw_spin_unlock_irq(&stopper1->lock);
 
        if (unlikely(err == -EDEADLK)) {
                while (stop_cpus_in_progress)
@@ -461,9 +461,9 @@ static int cpu_stop_should_run(unsigned int cpu)
        unsigned long flags;
        int run;
 
-       spin_lock_irqsave(&stopper->lock, flags);
+       raw_spin_lock_irqsave(&stopper->lock, flags);
        run = !list_empty(&stopper->works);
-       spin_unlock_irqrestore(&stopper->lock, flags);
+       raw_spin_unlock_irqrestore(&stopper->lock, flags);
        return run;
 }
 
@@ -474,13 +474,13 @@ static void cpu_stopper_thread(unsigned int cpu)
 
 repeat:
        work = NULL;
-       spin_lock_irq(&stopper->lock);
+       raw_spin_lock_irq(&stopper->lock);
        if (!list_empty(&stopper->works)) {
                work = list_first_entry(&stopper->works,
                                        struct cpu_stop_work, list);
                list_del_init(&work->list);
        }
-       spin_unlock_irq(&stopper->lock);
+       raw_spin_unlock_irq(&stopper->lock);
 
        if (work) {
                cpu_stop_fn_t fn = work->fn;
@@ -554,7 +554,7 @@ static int __init cpu_stop_init(void)
        for_each_possible_cpu(cpu) {
                struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
 
-               spin_lock_init(&stopper->lock);
+               raw_spin_lock_init(&stopper->lock);
                INIT_LIST_HEAD(&stopper->works);
        }