drivers/net: chelsio/cxgb*: Convert timers to use timer_setup()
authorKees Cook <keescook@chromium.org>
Fri, 27 Oct 2017 05:54:53 +0000 (22:54 -0700)
committerDavid S. Miller <davem@davemloft.net>
Sat, 28 Oct 2017 10:09:49 +0000 (19:09 +0900)
In preparation for unconditionally passing the struct timer_list pointer to
all timer callbacks, switch to using the new timer_setup() and from_timer()
to pass the timer pointer explicitly.

Cc: Santosh Raspatur <santosh@chelsio.com>
Cc: Ganesh Goudar <ganeshgr@chelsio.com>
Cc: Casey Leedom <leedom@chelsio.com>
Cc: netdev@vger.kernel.org
Signed-off-by: Kees Cook <keescook@chromium.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/chelsio/cxgb3/sge.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
drivers/net/ethernet/chelsio/cxgb4/sge.c
drivers/net/ethernet/chelsio/cxgb4vf/sge.c

index e2d3426..e3d28ae 100644 (file)
@@ -2853,9 +2853,9 @@ void t3_sge_err_intr_handler(struct adapter *adapter)
  *     bother cleaning them up here.
  *
  */
-static void sge_timer_tx(unsigned long data)
+static void sge_timer_tx(struct timer_list *t)
 {
-       struct sge_qset *qs = (struct sge_qset *)data;
+       struct sge_qset *qs = from_timer(qs, t, tx_reclaim_timer);
        struct port_info *pi = netdev_priv(qs->netdev);
        struct adapter *adap = pi->adapter;
        unsigned int tbd[SGE_TXQ_PER_SET] = {0, 0};
@@ -2893,10 +2893,10 @@ static void sge_timer_tx(unsigned long data)
  *     starved.
  *
  */
-static void sge_timer_rx(unsigned long data)
+static void sge_timer_rx(struct timer_list *t)
 {
        spinlock_t *lock;
-       struct sge_qset *qs = (struct sge_qset *)data;
+       struct sge_qset *qs = from_timer(qs, t, rx_reclaim_timer);
        struct port_info *pi = netdev_priv(qs->netdev);
        struct adapter *adap = pi->adapter;
        u32 status;
@@ -2976,8 +2976,8 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
        struct sge_qset *q = &adapter->sge.qs[id];
 
        init_qset_cntxt(q, id);
-       setup_timer(&q->tx_reclaim_timer, sge_timer_tx, (unsigned long)q);
-       setup_timer(&q->rx_reclaim_timer, sge_timer_rx, (unsigned long)q);
+       timer_setup(&q->tx_reclaim_timer, sge_timer_tx, 0);
+       timer_setup(&q->rx_reclaim_timer, sge_timer_rx, 0);
 
        q->fl[0].desc = alloc_ring(adapter->pdev, p->fl_size,
                                   sizeof(struct rx_desc),
index 9b6aabe..614db01 100644 (file)
@@ -741,9 +741,9 @@ err:
        return ret;
 }
 
-static void ch_flower_stats_cb(unsigned long data)
+static void ch_flower_stats_cb(struct timer_list *t)
 {
-       struct adapter *adap = (struct adapter *)data;
+       struct adapter *adap = from_timer(adap, t, flower_stats_timer);
        struct ch_tc_flower_entry *flower_entry;
        struct ch_tc_flower_stats *ofld_stats;
        unsigned int i;
@@ -815,8 +815,7 @@ err:
 void cxgb4_init_tc_flower(struct adapter *adap)
 {
        hash_init(adap->flower_anymatch_tbl);
-       setup_timer(&adap->flower_stats_timer, ch_flower_stats_cb,
-                   (unsigned long)adap);
+       timer_setup(&adap->flower_stats_timer, ch_flower_stats_cb, 0);
        mod_timer(&adap->flower_stats_timer, jiffies + STATS_CHECK_PERIOD);
 }
 
index 4ef68f6..486b01f 100644 (file)
@@ -2583,11 +2583,11 @@ irq_handler_t t4_intr_handler(struct adapter *adap)
        return t4_intr_intx;
 }
 
-static void sge_rx_timer_cb(unsigned long data)
+static void sge_rx_timer_cb(struct timer_list *t)
 {
        unsigned long m;
        unsigned int i;
-       struct adapter *adap = (struct adapter *)data;
+       struct adapter *adap = from_timer(adap, t, sge.rx_timer);
        struct sge *s = &adap->sge;
 
        for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++)
@@ -2620,11 +2620,11 @@ done:
        mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD);
 }
 
-static void sge_tx_timer_cb(unsigned long data)
+static void sge_tx_timer_cb(struct timer_list *t)
 {
        unsigned long m;
        unsigned int i, budget;
-       struct adapter *adap = (struct adapter *)data;
+       struct adapter *adap = from_timer(adap, t, sge.tx_timer);
        struct sge *s = &adap->sge;
 
        for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++)
@@ -3458,8 +3458,8 @@ int t4_sge_init(struct adapter *adap)
        /* Set up timers used for recuring callbacks to process RX and TX
         * administrative tasks.
         */
-       setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adap);
-       setup_timer(&s->tx_timer, sge_tx_timer_cb, (unsigned long)adap);
+       timer_setup(&s->rx_timer, sge_rx_timer_cb, 0);
+       timer_setup(&s->tx_timer, sge_tx_timer_cb, 0);
 
        spin_lock_init(&s->intrq_lock);
 
index 05498e7..14d7e67 100644 (file)
@@ -2058,9 +2058,9 @@ irq_handler_t t4vf_intr_handler(struct adapter *adapter)
  *     when out of memory a queue can become empty.  We schedule NAPI to do
  *     the actual refill.
  */
-static void sge_rx_timer_cb(unsigned long data)
+static void sge_rx_timer_cb(struct timer_list *t)
 {
-       struct adapter *adapter = (struct adapter *)data;
+       struct adapter *adapter = from_timer(adapter, t, sge.rx_timer);
        struct sge *s = &adapter->sge;
        unsigned int i;
 
@@ -2117,9 +2117,9 @@ static void sge_rx_timer_cb(unsigned long data)
  *     when no new packets are being submitted.  This is essential for pktgen,
  *     at least.
  */
-static void sge_tx_timer_cb(unsigned long data)
+static void sge_tx_timer_cb(struct timer_list *t)
 {
-       struct adapter *adapter = (struct adapter *)data;
+       struct adapter *adapter = from_timer(adapter, t, sge.tx_timer);
        struct sge *s = &adapter->sge;
        unsigned int i, budget;
 
@@ -2676,8 +2676,8 @@ int t4vf_sge_init(struct adapter *adapter)
        /*
         * Set up tasklet timers.
         */
-       setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adapter);
-       setup_timer(&s->tx_timer, sge_tx_timer_cb, (unsigned long)adapter);
+       timer_setup(&s->rx_timer, sge_rx_timer_cb, 0);
+       timer_setup(&s->tx_timer, sge_tx_timer_cb, 0);
 
        /*
         * Initialize Forwarded Interrupt Queue lock.