IB/hfi1: Close PSM sdma_progress sleep window
authorMike Marciniszyn <mike.marciniszyn@intel.com>
Mon, 24 Jun 2019 20:19:43 +0000 (16:19 -0400)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 3 Jul 2019 11:14:41 +0000 (13:14 +0200)
commit da9de5f8527f4b9efc82f967d29a583318c034c7 upstream.

The call to sdma_progress() is called outside the wait lock.

In this case, there is a race condition where sdma_progress() can return
false and the sdma_engine can idle.  If that happens, there will be no
more sdma interrupts to cause the wakeup and the user_sdma xmit will hang.

Fix by moving the lock to enclose the sdma_progress() call.

Also, delete busycount. The need for this was removed by:
commit bcad29137a97 ("IB/hfi1: Serve the most starved iowait entry first")

Ported to linux-4.19.y.

Cc: <stable@vger.kernel.org>
Fixes: 7724105686e7 ("IB/hfi1: add driver files")
Reviewed-by: Gary Leshner <Gary.S.Leshner@intel.com>
Signed-off-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
Signed-off-by: Sasha Levin <sashal@kernel.org>
drivers/infiniband/hw/hfi1/user_sdma.c
drivers/infiniband/hw/hfi1/user_sdma.h

index 51831bf..cbff746 100644 (file)
@@ -132,25 +132,22 @@ static int defer_packet_queue(
        struct hfi1_user_sdma_pkt_q *pq =
                container_of(wait, struct hfi1_user_sdma_pkt_q, busy);
        struct hfi1_ibdev *dev = &pq->dd->verbs_dev;
-       struct user_sdma_txreq *tx =
-               container_of(txreq, struct user_sdma_txreq, txreq);
 
-       if (sdma_progress(sde, seq, txreq)) {
-               if (tx->busycount++ < MAX_DEFER_RETRY_COUNT)
-                       goto eagain;
-       }
+       write_seqlock(&dev->iowait_lock);
+       if (sdma_progress(sde, seq, txreq))
+               goto eagain;
        /*
         * We are assuming that if the list is enqueued somewhere, it
         * is to the dmawait list since that is the only place where
         * it is supposed to be enqueued.
         */
        xchg(&pq->state, SDMA_PKT_Q_DEFERRED);
-       write_seqlock(&dev->iowait_lock);
        if (list_empty(&pq->busy.list))
                iowait_queue(pkts_sent, &pq->busy, &sde->dmawait);
        write_sequnlock(&dev->iowait_lock);
        return -EBUSY;
 eagain:
+       write_sequnlock(&dev->iowait_lock);
        return -EAGAIN;
 }
 
@@ -803,7 +800,6 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts)
 
                tx->flags = 0;
                tx->req = req;
-               tx->busycount = 0;
                INIT_LIST_HEAD(&tx->list);
 
                /*
index 91c343f..2c05670 100644 (file)
@@ -245,7 +245,6 @@ struct user_sdma_txreq {
        struct list_head list;
        struct user_sdma_request *req;
        u16 flags;
-       unsigned int busycount;
        u64 seqnum;
 };