can: rx-offload: can_rx_offload_threaded_irq_finish(): add new function to be called...
authorMarc Kleine-Budde <mkl@pengutronix.de>
Mon, 10 May 2021 20:51:39 +0000 (22:51 +0200)
committerMarc Kleine-Budde <mkl@pengutronix.de>
Sun, 25 Jul 2021 09:36:25 +0000 (11:36 +0200)
After reading all CAN frames from the controller in the IRQ handler
and storing them into a skb_queue, the driver calls napi_schedule().
In the napi poll function the skb from the skb_queue are then pushed
into the networking stack.

However if napi_schedule() is called from a threaded IRQ handler this
triggers the following error:

| NOHZ tick-stop error: Non-RCU local softirq work is pending, handler #08!!!

To avoid this, create a new rx-offload
function (can_rx_offload_threaded_irq_finish()) with a call to
local_bh_disable()/local_bh_enable() around the napi_schedule() call.

Convert all drivers that call can_rx_offload_irq_finish() from
threaded IRQ context to can_rx_offload_threaded_irq_finish().

Link: https://lore.kernel.org/r/20210724204745.736053-4-mkl@pengutronix.de
Suggested-by: Daniel Glöckner <dg@emlix.com>
Tested-by: Oleksij Rempel <o.rempel@pengutronix.de>
Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
drivers/net/can/dev/rx-offload.c
drivers/net/can/m_can/m_can.c
drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
include/linux/can/rx-offload.h

index 82ade3aa5c136162453fb7fad29e9fef37bcad1d..37b0cc65237b7ea5d5ca62152a29dd19ce2d5c6f 100644 (file)
@@ -299,6 +299,29 @@ void can_rx_offload_irq_finish(struct can_rx_offload *offload)
 }
 EXPORT_SYMBOL_GPL(can_rx_offload_irq_finish);
 
+void can_rx_offload_threaded_irq_finish(struct can_rx_offload *offload)
+{
+       unsigned long flags;
+       int queue_len;
+
+       if (skb_queue_empty_lockless(&offload->skb_irq_queue))
+               return;
+
+       spin_lock_irqsave(&offload->skb_queue.lock, flags);
+       skb_queue_splice_tail_init(&offload->skb_irq_queue, &offload->skb_queue);
+       spin_unlock_irqrestore(&offload->skb_queue.lock, flags);
+
+       queue_len = skb_queue_len(&offload->skb_queue);
+       if (queue_len > offload->skb_queue_len_max / 8)
+               netdev_dbg(offload->dev, "%s: queue_len=%d\n",
+                          __func__, queue_len);
+
+       local_bh_disable();
+       napi_schedule(&offload->napi);
+       local_bh_enable();
+}
+EXPORT_SYMBOL_GPL(can_rx_offload_threaded_irq_finish);
+
 static int can_rx_offload_init_queue(struct net_device *dev,
                                     struct can_rx_offload *offload,
                                     unsigned int weight)
index 18461982f7a13910ad7d825c97bd8a92956c12df..317cdc98c53919e0f1bde54239e35e9f581422ca 100644 (file)
@@ -1059,7 +1059,7 @@ static irqreturn_t m_can_isr(int irq, void *dev_id)
        }
 
        if (cdev->is_peripheral)
-               can_rx_offload_irq_finish(&cdev->offload);
+               can_rx_offload_threaded_irq_finish(&cdev->offload);
 
        return IRQ_HANDLED;
 }
index f3b267ec22e025a9c3b7315f70c5ced9bcbf3395..6962ab2749df3b0e09818de6468f7067772a1330 100644 (file)
@@ -2196,7 +2196,7 @@ static irqreturn_t mcp251xfd_irq(int irq, void *dev_id)
                                  priv->regs_status.intf);
 
                if (!(intf_pending)) {
-                       can_rx_offload_irq_finish(&priv->offload);
+                       can_rx_offload_threaded_irq_finish(&priv->offload);
                        return handled;
                }
 
@@ -2298,7 +2298,7 @@ static irqreturn_t mcp251xfd_irq(int irq, void *dev_id)
        } while (1);
 
  out_fail:
-       can_rx_offload_irq_finish(&priv->offload);
+       can_rx_offload_threaded_irq_finish(&priv->offload);
 
        netdev_err(priv->ndev, "IRQ handler returned %d (intf=0x%08x).\n",
                   err, priv->regs_status.intf);
index 516f64df0ebc0cf409c52827abf5a6664bdc7656..c11477620403c751d8215ae522852409d7e6bb8f 100644 (file)
@@ -50,6 +50,7 @@ unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload,
 int can_rx_offload_queue_tail(struct can_rx_offload *offload,
                              struct sk_buff *skb);
 void can_rx_offload_irq_finish(struct can_rx_offload *offload);
+void can_rx_offload_threaded_irq_finish(struct can_rx_offload *offload);
 void can_rx_offload_del(struct can_rx_offload *offload);
 void can_rx_offload_enable(struct can_rx_offload *offload);