staging: qlge: Remove rx_ring.sbq_buf_size
authorBenjamin Poirier <bpoirier@suse.com>
Fri, 27 Sep 2019 10:12:00 +0000 (19:12 +0900)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 4 Oct 2019 15:17:45 +0000 (17:17 +0200)
Tx completion rings have sbq_buf_size = 0 but there's no case where the
code actually tests on that value. We can remove sbq_buf_size and use a
constant instead.

Signed-off-by: Benjamin Poirier <bpoirier@suse.com>
Reviewed-by: Willem de Bruijn <willemb@google.com>
Link: https://lore.kernel.org/r/20190927101210.23856-7-bpoirier@suse.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/staging/qlge/qlge.h
drivers/staging/qlge/qlge_dbg.c
drivers/staging/qlge/qlge_main.c

index f32da8c..a3a52bb 100644 (file)
@@ -1447,7 +1447,6 @@ struct rx_ring {
        /* Small buffer queue elements. */
        u32 sbq_len;            /* entry count */
        u32 sbq_size;           /* size in bytes of queue */
-       u32 sbq_buf_size;
        void *sbq_base;
        dma_addr_t sbq_base_dma;
        void *sbq_base_indirect;
index 718943e..9f3f1b0 100644 (file)
@@ -1791,7 +1791,6 @@ void ql_dump_rx_ring(struct rx_ring *rx_ring)
        pr_err("rx_ring->sbq_curr_idx = %d\n", rx_ring->sbq_curr_idx);
        pr_err("rx_ring->sbq_clean_idx = %d\n", rx_ring->sbq_clean_idx);
        pr_err("rx_ring->sbq_free_cnt = %d\n", rx_ring->sbq_free_cnt);
-       pr_err("rx_ring->sbq_buf_size = %d\n", rx_ring->sbq_buf_size);
        pr_err("rx_ring->cq_id = %d\n", rx_ring->cq_id);
        pr_err("rx_ring->irq = %d\n", rx_ring->irq);
        pr_err("rx_ring->cpu = %d\n", rx_ring->cpu);
index 34bc1d9..0a3809c 100644 (file)
@@ -1164,7 +1164,7 @@ static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
                                skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
                                map = pci_map_single(qdev->pdev,
                                                     sbq_desc->p.skb->data,
-                                                    rx_ring->sbq_buf_size,
+                                                    SMALL_BUF_MAP_SIZE,
                                                     PCI_DMA_FROMDEVICE);
                                if (pci_dma_mapping_error(qdev->pdev, map)) {
                                        netif_err(qdev, ifup, qdev->ndev,
@@ -1594,14 +1594,13 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
 
        pci_dma_sync_single_for_cpu(qdev->pdev,
                                    dma_unmap_addr(sbq_desc, mapaddr),
-                                   rx_ring->sbq_buf_size,
-                                   PCI_DMA_FROMDEVICE);
+                                   SMALL_BUF_MAP_SIZE, PCI_DMA_FROMDEVICE);
 
        skb_put_data(new_skb, skb->data, length);
 
        pci_dma_sync_single_for_device(qdev->pdev,
                                       dma_unmap_addr(sbq_desc, mapaddr),
-                                      rx_ring->sbq_buf_size,
+                                      SMALL_BUF_MAP_SIZE,
                                       PCI_DMA_FROMDEVICE);
        skb = new_skb;
 
@@ -1723,7 +1722,7 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
                sbq_desc = ql_get_curr_sbuf(rx_ring);
                pci_unmap_single(qdev->pdev,
                                dma_unmap_addr(sbq_desc, mapaddr),
-                               rx_ring->sbq_buf_size, PCI_DMA_FROMDEVICE);
+                               SMALL_BUF_MAP_SIZE, PCI_DMA_FROMDEVICE);
                skb = sbq_desc->p.skb;
                ql_realign_skb(skb, hdr_len);
                skb_put(skb, hdr_len);
@@ -1755,13 +1754,13 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
                        pci_dma_sync_single_for_cpu(qdev->pdev,
                                                    dma_unmap_addr(sbq_desc,
                                                                   mapaddr),
-                                                   rx_ring->sbq_buf_size,
+                                                   SMALL_BUF_MAP_SIZE,
                                                    PCI_DMA_FROMDEVICE);
                        skb_put_data(skb, sbq_desc->p.skb->data, length);
                        pci_dma_sync_single_for_device(qdev->pdev,
                                                       dma_unmap_addr(sbq_desc,
                                                                      mapaddr),
-                                                      rx_ring->sbq_buf_size,
+                                                      SMALL_BUF_MAP_SIZE,
                                                       PCI_DMA_FROMDEVICE);
                } else {
                        netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
@@ -1773,7 +1772,7 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
                        skb_put(skb, length);
                        pci_unmap_single(qdev->pdev,
                                         dma_unmap_addr(sbq_desc, mapaddr),
-                                        rx_ring->sbq_buf_size,
+                                        SMALL_BUF_MAP_SIZE,
                                         PCI_DMA_FROMDEVICE);
                        sbq_desc->p.skb = NULL;
                }
@@ -1846,7 +1845,7 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
                sbq_desc = ql_get_curr_sbuf(rx_ring);
                pci_unmap_single(qdev->pdev,
                                 dma_unmap_addr(sbq_desc, mapaddr),
-                                rx_ring->sbq_buf_size, PCI_DMA_FROMDEVICE);
+                                SMALL_BUF_MAP_SIZE, PCI_DMA_FROMDEVICE);
                if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
                        /*
                         * This is an non TCP/UDP IP frame, so
@@ -2807,7 +2806,7 @@ static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring
                if (sbq_desc->p.skb) {
                        pci_unmap_single(qdev->pdev,
                                         dma_unmap_addr(sbq_desc, mapaddr),
-                                        rx_ring->sbq_buf_size,
+                                        SMALL_BUF_MAP_SIZE,
                                         PCI_DMA_FROMDEVICE);
                        dev_kfree_skb(sbq_desc->p.skb);
                        sbq_desc->p.skb = NULL;
@@ -3158,8 +3157,7 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
                } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
                cqicb->sbq_addr =
                    cpu_to_le64(rx_ring->sbq_base_indirect_dma);
-               cqicb->sbq_buf_size =
-                   cpu_to_le16((u16)(rx_ring->sbq_buf_size));
+               cqicb->sbq_buf_size = cpu_to_le16(SMALL_BUF_MAP_SIZE);
                bq_len = (rx_ring->sbq_len == 65536) ? 0 :
                        (u16) rx_ring->sbq_len;
                cqicb->sbq_len = cpu_to_le16(bq_len);
@@ -4109,7 +4107,6 @@ static int ql_configure_rings(struct ql_adapter *qdev)
                        rx_ring->sbq_len = NUM_SMALL_BUFFERS;
                        rx_ring->sbq_size =
                            rx_ring->sbq_len * sizeof(__le64);
-                       rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
                        rx_ring->type = RX_Q;
                } else {
                        /*
@@ -4123,7 +4120,6 @@ static int ql_configure_rings(struct ql_adapter *qdev)
                        rx_ring->lbq_size = 0;
                        rx_ring->sbq_len = 0;
                        rx_ring->sbq_size = 0;
-                       rx_ring->sbq_buf_size = 0;
                        rx_ring->type = TX_Q;
                }
        }