bus: mhi: host: Add spinlock to protect WP access when queueing TREs
authorBhaumik Bhatt <bbhatt@codeaurora.org>
Mon, 11 Dec 2023 06:42:51 +0000 (14:42 +0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 1 Feb 2024 00:18:52 +0000 (16:18 -0800)
commit b89b6a863dd53bc70d8e52d50f9cfaef8ef5e9c9 upstream.

Protect WP accesses such that multiple threads queueing buffers for
incoming data do not race.

Meanwhile, if CONFIG_TRACE_IRQFLAGS is enabled, irq will be enabled once
__local_bh_enable_ip is called as part of write_unlock_bh. Hence, let's
take irqsave lock after TRE is generated to avoid running write_unlock_bh
when irqsave lock is held.

Cc: stable@vger.kernel.org
Fixes: 189ff97cca53 ("bus: mhi: core: Add support for data transfer")
Signed-off-by: Bhaumik Bhatt <bbhatt@codeaurora.org>
Signed-off-by: Qiang Yu <quic_qianyu@quicinc.com>
Reviewed-by: Jeffrey Hugo <quic_jhugo@quicinc.com>
Tested-by: Jeffrey Hugo <quic_jhugo@quicinc.com>
Reviewed-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
Link: https://lore.kernel.org/r/1702276972-41296-2-git-send-email-quic_qianyu@quicinc.com
Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/bus/mhi/host/main.c

index 25a4745..d6653cb 100644 (file)
@@ -1127,17 +1127,15 @@ static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info,
        if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)))
                return -EIO;
 
-       read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
-
        ret = mhi_is_ring_full(mhi_cntrl, tre_ring);
-       if (unlikely(ret)) {
-               ret = -EAGAIN;
-               goto exit_unlock;
-       }
+       if (unlikely(ret))
+               return -EAGAIN;
 
        ret = mhi_gen_tre(mhi_cntrl, mhi_chan, buf_info, mflags);
        if (unlikely(ret))
-               goto exit_unlock;
+               return ret;
+
+       read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
 
        /* Packet is queued, take a usage ref to exit M3 if necessary
         * for host->device buffer, balanced put is done on buffer completion
@@ -1157,7 +1155,6 @@ static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info,
        if (dir == DMA_FROM_DEVICE)
                mhi_cntrl->runtime_put(mhi_cntrl);
 
-exit_unlock:
        read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
 
        return ret;
@@ -1209,6 +1206,9 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
        int eot, eob, chain, bei;
        int ret;
 
+       /* Protect accesses for reading and incrementing WP */
+       write_lock_bh(&mhi_chan->lock);
+
        buf_ring = &mhi_chan->buf_ring;
        tre_ring = &mhi_chan->tre_ring;
 
@@ -1226,8 +1226,10 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
 
        if (!info->pre_mapped) {
                ret = mhi_cntrl->map_single(mhi_cntrl, buf_info);
-               if (ret)
+               if (ret) {
+                       write_unlock_bh(&mhi_chan->lock);
                        return ret;
+               }
        }
 
        eob = !!(flags & MHI_EOB);
@@ -1244,6 +1246,8 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
        mhi_add_ring_element(mhi_cntrl, tre_ring);
        mhi_add_ring_element(mhi_cntrl, buf_ring);
 
+       write_unlock_bh(&mhi_chan->lock);
+
        return 0;
 }