From 2c104ea68350b7f49c4ae207afa6e7f7f5c81546 Mon Sep 17 00:00:00 2001 From: Erez Shitrit Date: Thu, 19 Oct 2017 07:56:42 +0300 Subject: [PATCH] IB/ipoib: Get rid of the tx_outstanding variable in all modes The first step toward using NAPI in the UD/TX flow is to separate between two flows, the NAPI and the xmit, meaning no use of shared variables between both flows. This patch takes out the tx_outstanding variable that was used in both flows and instead the driver uses the 2 cyclic ring variables: tx_head and tx_tail, tx_head used in the xmit flow and tx_tail in the NAPI flow. Cc: Kamal Heib Signed-off-by: Erez Shitrit Reviewed-by: Alex Vesker Signed-off-by: Leon Romanovsky Signed-off-by: Doug Ledford --- drivers/infiniband/ulp/ipoib/ipoib.h | 1 - drivers/infiniband/ulp/ipoib/ipoib_cm.c | 10 ++++++---- drivers/infiniband/ulp/ipoib/ipoib_ib.c | 10 ++++------ 3 files changed, 10 insertions(+), 11 deletions(-) diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index 7cc2b75..19c3ba2 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h @@ -381,7 +381,6 @@ struct ipoib_dev_priv { unsigned tx_tail; struct ib_sge tx_sge[MAX_SKB_FRAGS + 1]; struct ib_ud_wr tx_wr; - unsigned tx_outstanding; struct ib_wc send_wc[MAX_SEND_CQE]; struct ib_recv_wr rx_wr; diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index 7500c28..6e0fc59 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c @@ -769,8 +769,8 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_ } else { netif_trans_update(dev); ++tx->tx_head; - - if (++priv->tx_outstanding == ipoib_sendq_size) { + ++priv->tx_head; + if ((priv->tx_head - priv->tx_tail) == ipoib_sendq_size) { ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n", tx->qp->qp_num); netif_stop_queue(dev); @@ -814,7 +814,8 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc) netif_tx_lock(dev); ++tx->tx_tail; - if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) && + ++priv->tx_tail; + if (unlikely((priv->tx_head - priv->tx_tail) == ipoib_sendq_size >> 1) && netif_queue_stopped(dev) && test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) netif_wake_queue(dev); @@ -1220,8 +1221,9 @@ timeout: ipoib_dma_unmap_tx(priv, tx_req); dev_kfree_skb_any(tx_req->skb); ++p->tx_tail; + ++priv->tx_tail; netif_tx_lock_bh(p->dev); - if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) && + if (unlikely(priv->tx_head - priv->tx_tail == ipoib_sendq_size >> 1) && netif_queue_stopped(p->dev) && test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) netif_wake_queue(p->dev); diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index 89d82e2..c978f8f 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c @@ -406,7 +406,7 @@ static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc) dev_kfree_skb_any(tx_req->skb); ++priv->tx_tail; - if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) && + if (unlikely((priv->tx_head - priv->tx_tail) == ipoib_sendq_size >> 1) && netif_queue_stopped(dev) && test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) netif_wake_queue(dev); @@ -611,8 +611,8 @@ int ipoib_send(struct net_device *dev, struct sk_buff *skb, priv->tx_wr.wr.send_flags |= IB_SEND_IP_CSUM; else priv->tx_wr.wr.send_flags &= ~IB_SEND_IP_CSUM; - - if (++priv->tx_outstanding == ipoib_sendq_size) { + /* increase the tx_head after send success, but use it for queue state */ + if (priv->tx_head - priv->tx_tail == ipoib_sendq_size - 1) { ipoib_dbg(priv, "TX ring full, stopping kernel net queue\n"); if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP)) ipoib_warn(priv, "request notify on send CQ failed\n"); @@ -627,7 +627,6 @@ int ipoib_send(struct net_device *dev, struct sk_buff *skb, if (unlikely(rc)) { ipoib_warn(priv, "post_send failed, error %d\n", rc); ++dev->stats.tx_errors; - --priv->tx_outstanding; ipoib_dma_unmap_tx(priv, tx_req); dev_kfree_skb_any(skb); if (netif_queue_stopped(dev)) @@ -640,7 +639,7 @@ int ipoib_send(struct net_device *dev, struct sk_buff *skb, ++priv->tx_head; } - if (unlikely(priv->tx_outstanding > MAX_SEND_CQE)) + if (unlikely(priv->tx_head - priv->tx_tail > MAX_SEND_CQE)) while (poll_tx(priv)) ; /* nothing */ @@ -773,7 +772,6 @@ int ipoib_ib_dev_stop_default(struct net_device *dev) ipoib_dma_unmap_tx(priv, tx_req); dev_kfree_skb_any(tx_req->skb); ++priv->tx_tail; - --priv->tx_outstanding; } for (i = 0; i < ipoib_recvq_size; ++i) { -- 2.7.4