}
}
+/* For peripherals, pass skb to rx-offload, which will push skb from
+ * napi. For non-peripherals, RX is done in napi already, so push
+ * directly. timestamp is used to ensure good skb ordering in
+ * rx-offload and is ignored for non-peripherals.
+*/
+static void m_can_receive_skb(struct m_can_classdev *cdev,
+ struct sk_buff *skb,
+ u32 timestamp)
+{
+ if (cdev->is_peripheral)
+ can_rx_offload_queue_sorted(&cdev->offload, skb, timestamp);
+ else
+ netif_receive_skb(skb);
+}
+
static void m_can_read_fifo(struct net_device *dev, u32 rxfs)
{
struct net_device_stats *stats = &dev->stats;
struct canfd_frame *cf;
struct sk_buff *skb;
u32 id, fgi, dlc;
+ u32 timestamp = 0;
int i;
/* calculate the fifo get index for where to read data */
stats->rx_packets++;
stats->rx_bytes += cf->len;
- netif_receive_skb(skb);
+ timestamp = FIELD_GET(RX_BUF_RXTS_MASK, dlc);
+
+ m_can_receive_skb(cdev, skb, timestamp);
}
static int m_can_do_rx_poll(struct net_device *dev, int quota)
static int m_can_handle_lost_msg(struct net_device *dev)
{
+ struct m_can_classdev *cdev = netdev_priv(dev);
struct net_device_stats *stats = &dev->stats;
struct sk_buff *skb;
struct can_frame *frame;
+ u32 timestamp = 0;
netdev_err(dev, "msg lost in rxf0\n");
frame->can_id |= CAN_ERR_CRTL;
frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
- netif_receive_skb(skb);
+ if (cdev->is_peripheral)
+ timestamp = m_can_get_timestamp(cdev);
+
+ m_can_receive_skb(cdev, skb, timestamp);
return 1;
}
struct net_device_stats *stats = &dev->stats;
struct can_frame *cf;
struct sk_buff *skb;
+ u32 timestamp = 0;
cdev->can.can_stats.bus_error++;
stats->rx_errors++;
stats->rx_packets++;
stats->rx_bytes += cf->len;
- netif_receive_skb(skb);
+
+ if (cdev->is_peripheral)
+ timestamp = m_can_get_timestamp(cdev);
+
+ m_can_receive_skb(cdev, skb, timestamp);
return 1;
}
struct sk_buff *skb;
struct can_berr_counter bec;
unsigned int ecr;
+ u32 timestamp = 0;
switch (new_state) {
case CAN_STATE_ERROR_WARNING:
stats->rx_packets++;
stats->rx_bytes += cf->len;
- netif_receive_skb(skb);
+
+ if (cdev->is_peripheral)
+ timestamp = m_can_get_timestamp(cdev);
+
+ m_can_receive_skb(cdev, skb, timestamp);
return 1;
}
struct m_can_classdev *cdev = netdev_priv(dev);
struct can_frame *cf;
struct sk_buff *skb;
+ u32 timestamp = 0;
/* propagate the error condition to the CAN stack */
skb = alloc_can_err_skb(dev, &cf);
netdev_dbg(dev, "allocation of skb failed\n");
return 0;
}
- netif_receive_skb(skb);
+
+ if (cdev->is_peripheral)
+ timestamp = m_can_get_timestamp(cdev);
+
+ m_can_receive_skb(cdev, skb, timestamp);
return 1;
}
return work_done;
}
+/* Echo tx skb and update net stats. Peripherals use rx-offload for
+ * echo. timestamp is used for peripherals to ensure correct ordering
+ * by rx-offload, and is ignored for non-peripherals.
+*/
+static void m_can_tx_update_stats(struct m_can_classdev *cdev,
+ unsigned int msg_mark,
+ u32 timestamp)
+{
+ struct net_device *dev = cdev->net;
+ struct net_device_stats *stats = &dev->stats;
+
+ if (cdev->is_peripheral)
+ stats->tx_bytes +=
+ can_rx_offload_get_echo_skb(&cdev->offload,
+ msg_mark,
+ timestamp,
+ NULL);
+ else
+ stats->tx_bytes += can_get_echo_skb(dev, msg_mark, NULL);
+
+ stats->tx_packets++;
+}
+
static void m_can_echo_tx_event(struct net_device *dev)
{
u32 txe_count = 0;
unsigned int msg_mark;
struct m_can_classdev *cdev = netdev_priv(dev);
- struct net_device_stats *stats = &dev->stats;
/* read tx event fifo status */
m_can_txefs = m_can_read(cdev, M_CAN_TXEFS);
/* Get and process all sent elements */
for (i = 0; i < txe_count; i++) {
+ u32 txe, timestamp = 0;
+
/* retrieve get index */
fgi = (m_can_read(cdev, M_CAN_TXEFS) & TXEFS_EFGI_MASK) >>
TXEFS_EFGI_SHIFT;
- /* get message marker */
- msg_mark = (m_can_txe_fifo_read(cdev, fgi, 4) &
- TX_EVENT_MM_MASK) >> TX_EVENT_MM_SHIFT;
+ /* get message marker, timestamp */
+ txe = m_can_txe_fifo_read(cdev, fgi, 4);
+ msg_mark = (txe & TX_EVENT_MM_MASK) >> TX_EVENT_MM_SHIFT;
+ timestamp = FIELD_GET(TX_EVENT_TXTS_MASK, txe);
/* ack txe element */
m_can_write(cdev, M_CAN_TXEFA, (TXEFA_EFAI_MASK &
(fgi << TXEFA_EFAI_SHIFT)));
/* update stats */
- stats->tx_bytes += can_get_echo_skb(dev, msg_mark, NULL);
- stats->tx_packets++;
+ m_can_tx_update_stats(cdev, msg_mark, timestamp);
}
}
{
struct net_device *dev = (struct net_device *)dev_id;
struct m_can_classdev *cdev = netdev_priv(dev);
- struct net_device_stats *stats = &dev->stats;
u32 ir;
if (pm_runtime_suspended(cdev->dev))
if (cdev->version == 30) {
if (ir & IR_TC) {
/* Transmission Complete Interrupt*/
- stats->tx_bytes += can_get_echo_skb(dev, 0, NULL);
- stats->tx_packets++;
+ u32 timestamp = 0;
+
+ if (cdev->is_peripheral)
+ timestamp = m_can_get_timestamp(cdev);
+ m_can_tx_update_stats(cdev, 0, timestamp);
+
can_led_event(dev, CAN_LED_EVENT_TX);
netif_wake_queue(dev);
}
cdev->tx_wq = NULL;
}
+ if (cdev->is_peripheral)
+ can_rx_offload_disable(&cdev->offload);
+
close_candev(dev);
can_led_event(dev, CAN_LED_EVENT_STOP);
goto exit_disable_clks;
}
+ if (cdev->is_peripheral)
+ can_rx_offload_enable(&cdev->offload);
+
/* register interrupt handler */
if (cdev->is_peripheral) {
cdev->tx_skb = NULL;
if (cdev->is_peripheral)
destroy_workqueue(cdev->tx_wq);
out_wq_fail:
+ if (cdev->is_peripheral)
+ can_rx_offload_disable(&cdev->offload);
close_candev(dev);
exit_disable_clks:
m_can_clk_stop(cdev);
return ret;
}
+ if (cdev->is_peripheral) {
+ ret = can_rx_offload_add_manual(cdev->net, &cdev->offload,
+ M_CAN_NAPI_WEIGHT);
+ if (ret)
+ goto clk_disable;
+ }
+
ret = m_can_dev_setup(cdev);
if (ret)
- goto clk_disable;
+ goto rx_offload_del;
ret = register_m_can_dev(cdev->net);
if (ret) {
dev_err(cdev->dev, "registering %s failed (err=%d)\n",
cdev->net->name, ret);
- goto clk_disable;
+ goto rx_offload_del;
}
devm_can_led_init(cdev->net);
/* Probe finished
* Stop clocks. They will be reactivated once the M_CAN device is opened
*/
+ m_can_clk_stop(cdev);
+
+ return 0;
+
+rx_offload_del:
+ if (cdev->is_peripheral)
+ can_rx_offload_del(&cdev->offload);
clk_disable:
m_can_clk_stop(cdev);
void m_can_class_unregister(struct m_can_classdev *cdev)
{
+ if (cdev->is_peripheral)
+ can_rx_offload_del(&cdev->offload);
unregister_candev(cdev->net);
}
EXPORT_SYMBOL_GPL(m_can_class_unregister);