spi: Lock controller idling transition inside the io_mutex
authorDavid Jander <david@protonic.nl>
Tue, 21 Jun 2022 06:12:26 +0000 (08:12 +0200)
committerMark Brown <broonie@kernel.org>
Mon, 27 Jun 2022 12:27:18 +0000 (13:27 +0100)
This way, the spi sync path does not need to deal with the idling
transition.

Signed-off-by: David Jander <david@protonic.nl>
Link: https://lore.kernel.org/r/20220621061234.3626638-4-david@protonic.nl
Signed-off-by: Mark Brown <broonie@kernel.org>
drivers/spi/spi.c

index 2d057d0..cfff2ff 100644 (file)
@@ -1643,27 +1643,30 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
        unsigned long flags;
        int ret;
 
+       /* Take the IO mutex */
+       mutex_lock(&ctlr->io_mutex);
+
        /* Lock queue */
        spin_lock_irqsave(&ctlr->queue_lock, flags);
 
        /* Make sure we are not already running a message */
        if (ctlr->cur_msg) {
                spin_unlock_irqrestore(&ctlr->queue_lock, flags);
-               return;
+               goto out_unlock;
        }
 
        /* If another context is idling the device then defer */
        if (ctlr->idling) {
                kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
                spin_unlock_irqrestore(&ctlr->queue_lock, flags);
-               return;
+               goto out_unlock;
        }
 
        /* Check if the queue is idle */
        if (list_empty(&ctlr->queue) || !ctlr->running) {
                if (!ctlr->busy) {
                        spin_unlock_irqrestore(&ctlr->queue_lock, flags);
-                       return;
+                       goto out_unlock;
                }
 
                /* Defer any non-atomic teardown to the thread */
@@ -1679,7 +1682,7 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
                                                   &ctlr->pump_messages);
                        }
                        spin_unlock_irqrestore(&ctlr->queue_lock, flags);
-                       return;
+                       goto out_unlock;
                }
 
                ctlr->busy = false;
@@ -1701,7 +1704,7 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
                ctlr->idling = false;
                ctlr->queue_empty = true;
                spin_unlock_irqrestore(&ctlr->queue_lock, flags);
-               return;
+               goto out_unlock;
        }
 
        /* Extract head of queue */
@@ -1715,13 +1718,16 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
                ctlr->busy = true;
        spin_unlock_irqrestore(&ctlr->queue_lock, flags);
 
-       mutex_lock(&ctlr->io_mutex);
        ret = __spi_pump_transfer_message(ctlr, msg, was_busy);
        mutex_unlock(&ctlr->io_mutex);
 
        /* Prod the scheduler in case transfer_one() was busy waiting */
        if (!ret)
                cond_resched();
+       return;
+
+out_unlock:
+       mutex_unlock(&ctlr->io_mutex);
 }
 
 /**